2024-11-20 13:23:12,604 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 13:23:12,668 main DEBUG Took 0.056701 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 13:23:12,668 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 13:23:12,669 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 13:23:12,670 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 13:23:12,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,695 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 13:23:12,743 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,753 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,754 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,764 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,766 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,766 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,767 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,767 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,769 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,769 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,770 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,770 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,771 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,771 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,775 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,778 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,778 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,782 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,782 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,788 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 13:23:12,789 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,790 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 13:23:12,796 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 13:23:12,804 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 13:23:12,806 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 13:23:12,807 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 13:23:12,817 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 13:23:12,818 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 13:23:12,848 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 13:23:12,857 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 13:23:12,864 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 13:23:12,865 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 13:23:12,865 main DEBUG createAppenders(={Console}) 2024-11-20 13:23:12,866 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 13:23:12,867 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 13:23:12,867 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 13:23:12,889 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 13:23:12,889 main DEBUG OutputStream closed 2024-11-20 13:23:12,890 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 13:23:12,890 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 13:23:12,891 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 13:23:13,298 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 13:23:13,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 13:23:13,337 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 13:23:13,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 13:23:13,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 13:23:13,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 13:23:13,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 13:23:13,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 13:23:13,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 13:23:13,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 13:23:13,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 13:23:13,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 13:23:13,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 13:23:13,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 13:23:13,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 13:23:13,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 13:23:13,381 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 13:23:13,382 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 13:23:13,434 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 13:23:13,436 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 13:23:13,439 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 13:23:13,440 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T13:23:14,391 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298 2024-11-20 13:23:14,396 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 13:23:14,396 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T13:23:14,410 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-20T13:23:14,489 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T13:23:14,509 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3, deleteOnExit=true 2024-11-20T13:23:14,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T13:23:14,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/test.cache.data in system properties and HBase conf 2024-11-20T13:23:14,521 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T13:23:14,522 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.log.dir in system properties and HBase conf 2024-11-20T13:23:14,529 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T13:23:14,530 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T13:23:14,530 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T13:23:14,952 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T13:23:15,408 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T13:23:15,427 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T13:23:15,428 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T13:23:15,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T13:23:15,432 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T13:23:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T13:23:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T13:23:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T13:23:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T13:23:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T13:23:15,446 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/nfs.dump.dir in system properties and HBase conf 2024-11-20T13:23:15,446 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/java.io.tmpdir in system properties and HBase conf 2024-11-20T13:23:15,447 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T13:23:15,455 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T13:23:15,456 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T13:23:18,437 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T13:23:18,722 INFO [Time-limited test {}] log.Log(170): Logging initialized @9160ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T13:23:18,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T13:23:19,132 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T13:23:19,314 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T13:23:19,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T13:23:19,324 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T13:23:19,384 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T13:23:19,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61767546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.log.dir/,AVAILABLE} 2024-11-20T13:23:19,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3db7010a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T13:23:19,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54429d0a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/java.io.tmpdir/jetty-localhost-39473-hadoop-hdfs-3_4_1-tests_jar-_-any-16575621703843640249/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T13:23:20,013 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49566914{HTTP/1.1, (http/1.1)}{localhost:39473} 2024-11-20T13:23:20,015 INFO [Time-limited test {}] server.Server(415): Started @10453ms 2024-11-20T13:23:21,142 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T13:23:21,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T13:23:21,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T13:23:21,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T13:23:21,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T13:23:21,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54e7f8ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.log.dir/,AVAILABLE} 2024-11-20T13:23:21,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ab13c09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T13:23:21,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b45b1ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/java.io.tmpdir/jetty-localhost-46247-hadoop-hdfs-3_4_1-tests_jar-_-any-8420186353601128149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T13:23:21,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a4ec7f0{HTTP/1.1, (http/1.1)}{localhost:46247} 2024-11-20T13:23:21,537 INFO [Time-limited test {}] server.Server(415): Started @11976ms 2024-11-20T13:23:21,677 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T13:23:23,284 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/dfs/data/data1/current/BP-1593953133-172.17.0.2-1732108997715/current, will proceed with Du for space computation calculation, 2024-11-20T13:23:23,288 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/dfs/data/data2/current/BP-1593953133-172.17.0.2-1732108997715/current, will proceed with Du for space computation calculation, 2024-11-20T13:23:23,461 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T13:23:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94f9c06eab35b615 with lease ID 0x474fb08ec68a6f0d: Processing first storage report for DS-d8cd8742-a148-4191-aafe-461b3c89aca5 from datanode DatanodeRegistration(127.0.0.1:34799, datanodeUuid=0c8e8961-1a10-417f-bcf5-fd12b33608bf, infoPort=46721, infoSecurePort=0, ipcPort=35487, storageInfo=lv=-57;cid=testClusterID;nsid=544414496;c=1732108997715) 2024-11-20T13:23:23,623 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94f9c06eab35b615 with lease ID 0x474fb08ec68a6f0d: from storage DS-d8cd8742-a148-4191-aafe-461b3c89aca5 node DatanodeRegistration(127.0.0.1:34799, datanodeUuid=0c8e8961-1a10-417f-bcf5-fd12b33608bf, infoPort=46721, infoSecurePort=0, ipcPort=35487, storageInfo=lv=-57;cid=testClusterID;nsid=544414496;c=1732108997715), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T13:23:23,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94f9c06eab35b615 with lease ID 0x474fb08ec68a6f0d: Processing first storage report for DS-11efb4a9-3d77-44cc-8747-261898293e09 from datanode DatanodeRegistration(127.0.0.1:34799, datanodeUuid=0c8e8961-1a10-417f-bcf5-fd12b33608bf, infoPort=46721, infoSecurePort=0, ipcPort=35487, storageInfo=lv=-57;cid=testClusterID;nsid=544414496;c=1732108997715) 2024-11-20T13:23:23,625 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94f9c06eab35b615 with lease ID 0x474fb08ec68a6f0d: from storage DS-11efb4a9-3d77-44cc-8747-261898293e09 node DatanodeRegistration(127.0.0.1:34799, datanodeUuid=0c8e8961-1a10-417f-bcf5-fd12b33608bf, infoPort=46721, infoSecurePort=0, ipcPort=35487, storageInfo=lv=-57;cid=testClusterID;nsid=544414496;c=1732108997715), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T13:23:23,662 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298 2024-11-20T13:23:23,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/zookeeper_0, clientPort=53074, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T13:23:23,890 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=53074 2024-11-20T13:23:23,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:23,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741825_1001 (size=7) 2024-11-20T13:23:24,418 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc with version=8 2024-11-20T13:23:24,421 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/hbase-staging 2024-11-20T13:23:24,696 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T13:23:25,410 INFO [Time-limited test {}] client.ConnectionUtils(129): master/5ef453f0fbb6:0 server-side Connection retries=45 2024-11-20T13:23:25,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:25,448 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:25,448 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T13:23:25,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:25,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T13:23:25,661 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T13:23:25,756 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T13:23:25,770 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T13:23:25,776 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T13:23:25,818 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22044 (auto-detected) 2024-11-20T13:23:25,820 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T13:23:25,849 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43647 2024-11-20T13:23:25,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:25,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:25,885 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:43647 connecting to ZooKeeper ensemble=127.0.0.1:53074 2024-11-20T13:23:25,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436470x0, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T13:23:25,949 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43647-0x1001519a4e30000 connected 2024-11-20T13:23:26,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T13:23:26,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T13:23:26,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T13:23:26,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43647 2024-11-20T13:23:26,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43647 2024-11-20T13:23:26,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43647 2024-11-20T13:23:26,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43647 2024-11-20T13:23:26,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43647 2024-11-20T13:23:26,031 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc, hbase.cluster.distributed=false 2024-11-20T13:23:26,139 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5ef453f0fbb6:0 server-side Connection retries=45 2024-11-20T13:23:26,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:26,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:26,140 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T13:23:26,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T13:23:26,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T13:23:26,142 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T13:23:26,145 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T13:23:26,146 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46739 2024-11-20T13:23:26,148 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T13:23:26,155 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T13:23:26,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:26,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:26,163 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46739 connecting to ZooKeeper ensemble=127.0.0.1:53074 2024-11-20T13:23:26,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467390x0, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T13:23:26,168 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:467390x0, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T13:23:26,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46739-0x1001519a4e30001 connected 2024-11-20T13:23:26,170 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T13:23:26,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T13:23:26,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46739 2024-11-20T13:23:26,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46739 2024-11-20T13:23:26,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46739 2024-11-20T13:23:26,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46739 2024-11-20T13:23:26,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46739 2024-11-20T13:23:26,179 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:26,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T13:23:26,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T13:23:26,197 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ef453f0fbb6:43647 2024-11-20T13:23:26,201 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:26,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T13:23:26,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T13:23:26,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:26,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:26,236 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T13:23:26,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T13:23:26,238 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5ef453f0fbb6,43647,1732109004684 from backup master directory 2024-11-20T13:23:26,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:26,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T13:23:26,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T13:23:26,243 WARN [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T13:23:26,243 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:26,246 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T13:23:26,248 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T13:23:26,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741826_1002 (size=42) 2024-11-20T13:23:26,747 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/hbase.id with ID: bc029da1-60d3-47d3-b6b4-85b78192e2e8 2024-11-20T13:23:26,809 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T13:23:26,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:26,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:26,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741827_1003 (size=196) 2024-11-20T13:23:27,313 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:23:27,316 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T13:23:27,341 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:27,347 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T13:23:27,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741828_1004 (size=1189) 2024-11-20T13:23:27,423 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store 2024-11-20T13:23:27,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741829_1005 (size=34) 2024-11-20T13:23:27,471 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T13:23:27,471 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:27,473 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T13:23:27,473 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:23:27,473 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:23:27,473 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T13:23:27,474 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:23:27,474 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:23:27,474 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T13:23:27,477 WARN [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/.initializing 2024-11-20T13:23:27,477 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/WALs/5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:27,489 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T13:23:27,505 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ef453f0fbb6%2C43647%2C1732109004684, suffix=, logDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/WALs/5ef453f0fbb6,43647,1732109004684, archiveDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/oldWALs, maxLogs=10 2024-11-20T13:23:27,537 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/WALs/5ef453f0fbb6,43647,1732109004684/5ef453f0fbb6%2C43647%2C1732109004684.1732109007512, exclude list is [], retry=0 2024-11-20T13:23:27,569 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34799,DS-d8cd8742-a148-4191-aafe-461b3c89aca5,DISK] 2024-11-20T13:23:27,573 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T13:23:27,630 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/WALs/5ef453f0fbb6,43647,1732109004684/5ef453f0fbb6%2C43647%2C1732109004684.1732109007512 2024-11-20T13:23:27,631 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46721:46721)] 2024-11-20T13:23:27,632 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:23:27,633 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:27,641 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,642 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T13:23:27,746 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:27,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:27,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T13:23:27,763 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:27,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:27,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T13:23:27,772 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:27,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:27,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T13:23:27,777 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:27,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:27,784 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,788 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,802 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T13:23:27,812 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T13:23:27,825 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:23:27,828 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75086015, jitterRate=0.11886881291866302}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T13:23:27,836 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T13:23:27,840 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T13:23:27,889 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61a883fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:27,941 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T13:23:27,959 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T13:23:27,959 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T13:23:27,963 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T13:23:27,965 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 2 msec 2024-11-20T13:23:27,972 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-11-20T13:23:27,972 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T13:23:28,022 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T13:23:28,044 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T13:23:28,050 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T13:23:28,054 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T13:23:28,056 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T13:23:28,058 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T13:23:28,061 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T13:23:28,066 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T13:23:28,070 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T13:23:28,073 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T13:23:28,075 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T13:23:28,096 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T13:23:28,098 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T13:23:28,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T13:23:28,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,107 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=5ef453f0fbb6,43647,1732109004684, sessionid=0x1001519a4e30000, setting cluster-up flag (Was=false) 2024-11-20T13:23:28,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T13:23:28,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,138 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T13:23:28,141 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:28,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,158 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T13:23:28,161 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:28,206 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ef453f0fbb6:46739 2024-11-20T13:23:28,215 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1008): ClusterId : bc029da1-60d3-47d3-b6b4-85b78192e2e8 2024-11-20T13:23:28,219 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T13:23:28,237 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T13:23:28,238 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T13:23:28,251 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T13:23:28,252 DEBUG [RS:0;5ef453f0fbb6:46739 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ac843ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:28,260 DEBUG [RS:0;5ef453f0fbb6:46739 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47849324, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ef453f0fbb6/172.17.0.2:0 2024-11-20T13:23:28,264 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T13:23:28,265 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T13:23:28,265 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T13:23:28,271 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(3073): reportForDuty to master=5ef453f0fbb6,43647,1732109004684 with isa=5ef453f0fbb6/172.17.0.2:46739, startcode=1732109006137 2024-11-20T13:23:28,290 DEBUG [RS:0;5ef453f0fbb6:46739 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T13:23:28,299 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T13:23:28,307 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T13:23:28,312 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T13:23:28,320 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ef453f0fbb6,43647,1732109004684 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T13:23:28,326 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ef453f0fbb6:0, corePoolSize=5, maxPoolSize=5 2024-11-20T13:23:28,326 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ef453f0fbb6:0, corePoolSize=5, maxPoolSize=5 2024-11-20T13:23:28,326 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ef453f0fbb6:0, corePoolSize=5, maxPoolSize=5 2024-11-20T13:23:28,327 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ef453f0fbb6:0, corePoolSize=5, maxPoolSize=5 2024-11-20T13:23:28,327 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ef453f0fbb6:0, corePoolSize=10, maxPoolSize=10 2024-11-20T13:23:28,327 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,327 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ef453f0fbb6:0, corePoolSize=2, maxPoolSize=2 2024-11-20T13:23:28,328 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,341 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58611, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T13:23:28,346 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T13:23:28,346 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T13:23:28,343 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732109038343 2024-11-20T13:23:28,350 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T13:23:28,351 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T13:23:28,356 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43647 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:28,357 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T13:23:28,358 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T13:23:28,360 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T13:23:28,360 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T13:23:28,365 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,368 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:28,369 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T13:23:28,374 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T13:23:28,376 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T13:23:28,377 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T13:23:28,380 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T13:23:28,381 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T13:23:28,384 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.large.0-1732109008382,5,FailOnTimeoutGroup] 2024-11-20T13:23:28,384 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.small.0-1732109008384,5,FailOnTimeoutGroup] 2024-11-20T13:23:28,384 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,385 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T13:23:28,386 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,386 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741831_1007 (size=1039) 2024-11-20T13:23:28,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T13:23:28,397 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:23:28,410 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-20T13:23:28,410 WARN [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-20T13:23:28,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741832_1008 (size=32) 2024-11-20T13:23:28,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:28,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T13:23:28,420 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T13:23:28,420 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:28,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:28,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T13:23:28,424 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T13:23:28,424 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:28,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:28,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T13:23:28,427 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T13:23:28,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:28,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:28,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740 2024-11-20T13:23:28,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740 2024-11-20T13:23:28,439 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:23:28,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T13:23:28,448 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:23:28,449 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63196040, jitterRate=-0.05830562114715576}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:23:28,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T13:23:28,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T13:23:28,451 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T13:23:28,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T13:23:28,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T13:23:28,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T13:23:28,454 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T13:23:28,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T13:23:28,457 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T13:23:28,457 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T13:23:28,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T13:23:28,473 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T13:23:28,475 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T13:23:28,511 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(3073): reportForDuty to master=5ef453f0fbb6,43647,1732109004684 with isa=5ef453f0fbb6/172.17.0.2:46739, startcode=1732109006137 2024-11-20T13:23:28,514 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43647 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,517 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43647 {}] master.ServerManager(486): Registering regionserver=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,531 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:23:28,531 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40089 2024-11-20T13:23:28,531 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T13:23:28,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T13:23:28,537 DEBUG [RS:0;5ef453f0fbb6:46739 {}] zookeeper.ZKUtil(111): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,537 WARN [RS:0;5ef453f0fbb6:46739 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T13:23:28,537 INFO [RS:0;5ef453f0fbb6:46739 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T13:23:28,537 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,540 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ef453f0fbb6,46739,1732109006137] 2024-11-20T13:23:28,551 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T13:23:28,563 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T13:23:28,576 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T13:23:28,578 INFO [RS:0;5ef453f0fbb6:46739 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T13:23:28,579 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,580 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T13:23:28,589 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,589 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,589 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,590 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,590 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,590 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,590 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ef453f0fbb6:0, corePoolSize=2, maxPoolSize=2 2024-11-20T13:23:28,591 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,591 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,591 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,591 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,592 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ef453f0fbb6:0, corePoolSize=1, maxPoolSize=1 2024-11-20T13:23:28,592 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ef453f0fbb6:0, corePoolSize=3, maxPoolSize=3 2024-11-20T13:23:28,592 DEBUG [RS:0;5ef453f0fbb6:46739 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0, corePoolSize=3, maxPoolSize=3 2024-11-20T13:23:28,596 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,596 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,596 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,596 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,596 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,46739,1732109006137-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T13:23:28,626 WARN [5ef453f0fbb6:43647 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-20T13:23:28,629 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T13:23:28,631 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,46739,1732109006137-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:28,657 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.Replication(204): 5ef453f0fbb6,46739,1732109006137 started 2024-11-20T13:23:28,657 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1767): Serving as 5ef453f0fbb6,46739,1732109006137, RpcServer on 5ef453f0fbb6/172.17.0.2:46739, sessionid=0x1001519a4e30001 2024-11-20T13:23:28,658 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T13:23:28,658 DEBUG [RS:0;5ef453f0fbb6:46739 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,659 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ef453f0fbb6,46739,1732109006137' 2024-11-20T13:23:28,659 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T13:23:28,661 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T13:23:28,663 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T13:23:28,664 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T13:23:28,664 DEBUG [RS:0;5ef453f0fbb6:46739 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,664 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ef453f0fbb6,46739,1732109006137' 2024-11-20T13:23:28,664 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T13:23:28,665 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T13:23:28,666 DEBUG [RS:0;5ef453f0fbb6:46739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T13:23:28,667 INFO [RS:0;5ef453f0fbb6:46739 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T13:23:28,667 INFO [RS:0;5ef453f0fbb6:46739 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T13:23:28,772 INFO [RS:0;5ef453f0fbb6:46739 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T13:23:28,777 INFO [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ef453f0fbb6%2C46739%2C1732109006137, suffix=, logDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137, archiveDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/oldWALs, maxLogs=32 2024-11-20T13:23:28,805 DEBUG [RS:0;5ef453f0fbb6:46739 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137/5ef453f0fbb6%2C46739%2C1732109006137.1732109008780, exclude list is [], retry=0 2024-11-20T13:23:28,815 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34799,DS-d8cd8742-a148-4191-aafe-461b3c89aca5,DISK] 2024-11-20T13:23:28,825 INFO [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137/5ef453f0fbb6%2C46739%2C1732109006137.1732109008780 2024-11-20T13:23:28,826 DEBUG [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46721:46721)] 2024-11-20T13:23:28,879 DEBUG [5ef453f0fbb6:43647 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T13:23:28,886 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:28,893 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ef453f0fbb6,46739,1732109006137, state=OPENING 2024-11-20T13:23:28,900 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T13:23:28,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:28,908 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T13:23:28,908 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T13:23:28,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:23:29,090 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:29,092 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T13:23:29,097 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T13:23:29,108 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T13:23:29,108 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T13:23:29,108 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T13:23:29,112 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ef453f0fbb6%2C46739%2C1732109006137.meta, suffix=.meta, logDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137, archiveDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/oldWALs, maxLogs=32 2024-11-20T13:23:29,129 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137/5ef453f0fbb6%2C46739%2C1732109006137.meta.1732109009114.meta, exclude list is [], retry=0 2024-11-20T13:23:29,133 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34799,DS-d8cd8742-a148-4191-aafe-461b3c89aca5,DISK] 2024-11-20T13:23:29,137 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/WALs/5ef453f0fbb6,46739,1732109006137/5ef453f0fbb6%2C46739%2C1732109006137.meta.1732109009114.meta 2024-11-20T13:23:29,137 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46721:46721)] 2024-11-20T13:23:29,138 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:23:29,140 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T13:23:29,227 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T13:23:29,234 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T13:23:29,240 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T13:23:29,240 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:29,240 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T13:23:29,241 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T13:23:29,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T13:23:29,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T13:23:29,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:29,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:29,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T13:23:29,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T13:23:29,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:29,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:29,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T13:23:29,253 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T13:23:29,253 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:29,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T13:23:29,256 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740 2024-11-20T13:23:29,260 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740 2024-11-20T13:23:29,264 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:23:29,267 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T13:23:29,269 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73105675, jitterRate=0.08935944736003876}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:23:29,272 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T13:23:29,280 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732109009084 2024-11-20T13:23:29,292 DEBUG [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T13:23:29,293 INFO [RS_OPEN_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T13:23:29,294 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:29,296 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ef453f0fbb6,46739,1732109006137, state=OPEN 2024-11-20T13:23:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T13:23:29,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T13:23:29,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T13:23:29,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T13:23:29,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T13:23:29,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=5ef453f0fbb6,46739,1732109006137 in 390 msec 2024-11-20T13:23:29,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T13:23:29,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 845 msec 2024-11-20T13:23:29,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0960 sec 2024-11-20T13:23:29,319 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732109009319, completionTime=-1 2024-11-20T13:23:29,319 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T13:23:29,319 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T13:23:29,359 DEBUG [hconnection-0x47fac14f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:29,362 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47800, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:29,375 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T13:23:29,375 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732109069375 2024-11-20T13:23:29,375 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732109129375 2024-11-20T13:23:29,375 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 55 msec 2024-11-20T13:23:29,398 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:29,399 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:29,399 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:29,400 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ef453f0fbb6:43647, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:29,401 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:29,407 DEBUG [master/5ef453f0fbb6:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T13:23:29,410 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T13:23:29,411 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T13:23:29,417 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T13:23:29,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:23:29,421 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:29,423 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:23:29,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741835_1011 (size=358) 2024-11-20T13:23:29,441 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2b8ebee7bfda350373a6614eb33b4fd3, NAME => 'hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:23:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741836_1012 (size=42) 2024-11-20T13:23:29,852 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:29,852 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 2b8ebee7bfda350373a6614eb33b4fd3, disabling compactions & flushes 2024-11-20T13:23:29,852 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:29,853 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:29,853 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. after waiting 0 ms 2024-11-20T13:23:29,853 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:29,853 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:29,853 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2b8ebee7bfda350373a6614eb33b4fd3: 2024-11-20T13:23:29,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:23:29,865 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732109009858"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109009858"}]},"ts":"1732109009858"} 2024-11-20T13:23:29,933 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:23:29,936 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:23:29,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109009937"}]},"ts":"1732109009937"} 2024-11-20T13:23:29,952 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T13:23:29,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2b8ebee7bfda350373a6614eb33b4fd3, ASSIGN}] 2024-11-20T13:23:29,967 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2b8ebee7bfda350373a6614eb33b4fd3, ASSIGN 2024-11-20T13:23:29,969 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=2b8ebee7bfda350373a6614eb33b4fd3, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:23:30,120 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2b8ebee7bfda350373a6614eb33b4fd3, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:30,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 2b8ebee7bfda350373a6614eb33b4fd3, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:23:30,281 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:30,290 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:30,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 2b8ebee7bfda350373a6614eb33b4fd3, NAME => 'hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:23:30,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:30,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,295 INFO [StoreOpener-2b8ebee7bfda350373a6614eb33b4fd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,298 INFO [StoreOpener-2b8ebee7bfda350373a6614eb33b4fd3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b8ebee7bfda350373a6614eb33b4fd3 columnFamilyName info 2024-11-20T13:23:30,298 DEBUG [StoreOpener-2b8ebee7bfda350373a6614eb33b4fd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:30,299 INFO [StoreOpener-2b8ebee7bfda350373a6614eb33b4fd3-1 {}] regionserver.HStore(327): Store=2b8ebee7bfda350373a6614eb33b4fd3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:30,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,302 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:23:30,312 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:23:30,313 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 2b8ebee7bfda350373a6614eb33b4fd3; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71528846, jitterRate=0.06586286425590515}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T13:23:30,315 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 2b8ebee7bfda350373a6614eb33b4fd3: 2024-11-20T13:23:30,318 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3., pid=6, masterSystemTime=1732109010281 2024-11-20T13:23:30,323 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:30,323 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:23:30,324 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2b8ebee7bfda350373a6614eb33b4fd3, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:30,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T13:23:30,344 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 2b8ebee7bfda350373a6614eb33b4fd3, server=5ef453f0fbb6,46739,1732109006137 in 210 msec 2024-11-20T13:23:30,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T13:23:30,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=2b8ebee7bfda350373a6614eb33b4fd3, ASSIGN in 379 msec 2024-11-20T13:23:30,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:23:30,350 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109010349"}]},"ts":"1732109010349"} 2024-11-20T13:23:30,353 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T13:23:30,358 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:23:30,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 946 msec 2024-11-20T13:23:30,422 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T13:23:30,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:30,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T13:23:30,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:23:30,471 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T13:23:30,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T13:23:30,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 32 msec 2024-11-20T13:23:30,506 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T13:23:30,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T13:23:30,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 18 msec 2024-11-20T13:23:30,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T13:23:30,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T13:23:30,537 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.294sec 2024-11-20T13:23:30,539 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T13:23:30,541 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T13:23:30,542 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T13:23:30,542 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T13:23:30,543 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T13:23:30,544 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T13:23:30,544 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T13:23:30,552 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T13:23:30,553 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T13:23:30,553 INFO [master/5ef453f0fbb6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ef453f0fbb6,43647,1732109004684-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T13:23:30,625 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05cd5d49 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@13f2f5b 2024-11-20T13:23:30,626 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T13:23:30,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1674876e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:30,639 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T13:23:30,639 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T13:23:30,651 DEBUG [hconnection-0x49a7275d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:30,662 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:30,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=5ef453f0fbb6,43647,1732109004684 2024-11-20T13:23:30,694 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=993, ProcessCount=11, AvailableMemoryMB=2412 2024-11-20T13:23:30,709 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:23:30,712 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:23:30,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:23:30,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:23:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:30,731 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:23:30,731 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:30,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T13:23:30,734 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:23:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:30,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741837_1013 (size=963) 2024-11-20T13:23:30,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:31,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:31,149 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:23:31,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741838_1014 (size=53) 2024-11-20T13:23:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:31,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:31,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cbbdc72320da06253b5398d0c51c77ae, disabling compactions & flushes 2024-11-20T13:23:31,563 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,564 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,564 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. after waiting 0 ms 2024-11-20T13:23:31,564 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,564 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,564 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:31,567 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:23:31,567 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109011567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109011567"}]},"ts":"1732109011567"} 2024-11-20T13:23:31,571 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:23:31,573 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:23:31,574 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109011573"}]},"ts":"1732109011573"} 2024-11-20T13:23:31,576 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:23:31,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, ASSIGN}] 2024-11-20T13:23:31,584 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, ASSIGN 2024-11-20T13:23:31,586 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:23:31,737 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=cbbdc72320da06253b5398d0c51c77ae, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:31,741 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:23:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:31,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:31,903 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,903 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:23:31,904 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,904 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:31,904 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,904 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,906 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,910 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:31,911 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbbdc72320da06253b5398d0c51c77ae columnFamilyName A 2024-11-20T13:23:31,911 DEBUG [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:31,913 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(327): Store=cbbdc72320da06253b5398d0c51c77ae/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:31,913 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,915 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:31,916 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbbdc72320da06253b5398d0c51c77ae columnFamilyName B 2024-11-20T13:23:31,916 DEBUG [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:31,917 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(327): Store=cbbdc72320da06253b5398d0c51c77ae/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:31,917 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,920 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:31,921 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbbdc72320da06253b5398d0c51c77ae columnFamilyName C 2024-11-20T13:23:31,921 DEBUG [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:31,922 INFO [StoreOpener-cbbdc72320da06253b5398d0c51c77ae-1 {}] regionserver.HStore(327): Store=cbbdc72320da06253b5398d0c51c77ae/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:31,922 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,924 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,925 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,928 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:23:31,931 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:31,935 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:23:31,935 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened cbbdc72320da06253b5398d0c51c77ae; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64446092, jitterRate=-0.03967839479446411}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:23:31,936 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:31,938 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., pid=11, masterSystemTime=1732109011895 2024-11-20T13:23:31,941 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,941 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:31,942 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=cbbdc72320da06253b5398d0c51c77ae, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:31,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T13:23:31,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 in 205 msec 2024-11-20T13:23:31,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T13:23:31,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, ASSIGN in 367 msec 2024-11-20T13:23:31,955 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:23:31,955 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109011955"}]},"ts":"1732109011955"} 2024-11-20T13:23:31,958 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:23:31,963 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:23:31,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2370 sec 2024-11-20T13:23:32,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T13:23:32,858 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T13:23:32,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6869c97c to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69fde1e2 2024-11-20T13:23:32,870 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43c0ded7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,873 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,875 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:32,879 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:23:32,881 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:23:32,891 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e527f0a to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@16e8e017 2024-11-20T13:23:32,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b22ba9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,899 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x71111e88 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2028a0e5 2024-11-20T13:23:32,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36a3e1b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,904 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75ceb322 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4058e5e5 2024-11-20T13:23:32,909 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69df121b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,910 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70f8b481 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@778810b6 2024-11-20T13:23:32,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59ea5382, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,917 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c8859ce to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d8b7aa6 2024-11-20T13:23:32,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@467656d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,925 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x080f3e14 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e7389de 2024-11-20T13:23:32,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e6736ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,931 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2aea0556 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@384544ac 2024-11-20T13:23:32,936 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7067f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,938 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f59a475 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@13ca55c1 2024-11-20T13:23:32,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38a24623, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38296ae7 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@790c447e 2024-11-20T13:23:32,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53359622, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:23:32,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:32,966 DEBUG [hconnection-0x4d0fdb91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T13:23:32,971 DEBUG [hconnection-0x2a56c554-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,971 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:32,971 DEBUG [hconnection-0x10a9a0d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,972 DEBUG [hconnection-0x4766e894-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:32,973 DEBUG [hconnection-0x642ace61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,973 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:32,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:32,975 DEBUG [hconnection-0x74f48c42-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,975 DEBUG [hconnection-0x4c038b40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,978 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:32,985 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:32,989 DEBUG [hconnection-0x1b171e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:32,994 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:32,996 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:32,997 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:33,001 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:33,002 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:33,006 DEBUG [hconnection-0x5790ecab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:23:33,041 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:33,070 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:23:33,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:33,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:23:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:33,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:33,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:33,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:33,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:33,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:33,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/be746f7c9fdb466d84fb2592a151cbf5 is 50, key is test_row_0/A:col10/1732109013119/Put/seqid=0 2024-11-20T13:23:33,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741839_1015 (size=12001) 2024-11-20T13:23:33,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/be746f7c9fdb466d84fb2592a151cbf5 2024-11-20T13:23:33,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109073424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109073438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109073450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109073462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109073479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:33,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109073589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:33,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109073587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:33,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109073594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109073594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109073590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b96e0ff69c974e578f863b0fa6ca68be is 50, key is test_row_0/B:col10/1732109013119/Put/seqid=0 2024-11-20T13:23:33,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741840_1016 (size=12001) 2024-11-20T13:23:33,759 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:33,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:33,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b96e0ff69c974e578f863b0fa6ca68be 2024-11-20T13:23:33,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109073799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109073814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109073812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109073814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109073832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/db39da6ddaf34b9392e58b8c1dbe9d3a is 50, key is test_row_0/C:col10/1732109013119/Put/seqid=0 2024-11-20T13:23:33,916 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:33,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:33,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:33,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741841_1017 (size=12001) 2024-11-20T13:23:33,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:33,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:33,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/db39da6ddaf34b9392e58b8c1dbe9d3a 2024-11-20T13:23:33,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/be746f7c9fdb466d84fb2592a151cbf5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5 2024-11-20T13:23:33,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T13:23:33,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b96e0ff69c974e578f863b0fa6ca68be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be 2024-11-20T13:23:33,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T13:23:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/db39da6ddaf34b9392e58b8c1dbe9d3a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a 2024-11-20T13:23:34,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T13:23:34,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cbbdc72320da06253b5398d0c51c77ae in 914ms, sequenceid=13, compaction requested=false 2024-11-20T13:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T13:23:34,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:34,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T13:23:34,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:34,075 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:23:34,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:34,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 is 50, key is test_row_0/A:col10/1732109013403/Put/seqid=0 2024-11-20T13:23:34,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:34,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:34,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109074155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741842_1018 (size=12001) 2024-11-20T13:23:34,184 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 2024-11-20T13:23:34,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109074158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109074164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109074165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109074173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/4154fc76c16c4943a8f7cfd980028568 is 50, key is test_row_0/B:col10/1732109013403/Put/seqid=0 2024-11-20T13:23:34,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109074277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741843_1019 (size=12001) 2024-11-20T13:23:34,302 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/4154fc76c16c4943a8f7cfd980028568 2024-11-20T13:23:34,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109074309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109074310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109074310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109074313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/922e9546d60d479b84139d8b5136e065 is 50, key is test_row_0/C:col10/1732109013403/Put/seqid=0 2024-11-20T13:23:34,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741844_1020 (size=12001) 2024-11-20T13:23:34,425 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/922e9546d60d479b84139d8b5136e065 2024-11-20T13:23:34,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 2024-11-20T13:23:34,456 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T13:23:34,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/4154fc76c16c4943a8f7cfd980028568 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568 2024-11-20T13:23:34,473 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T13:23:34,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/922e9546d60d479b84139d8b5136e065 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065 2024-11-20T13:23:34,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109074486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,492 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T13:23:34,497 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for cbbdc72320da06253b5398d0c51c77ae in 423ms, sequenceid=37, compaction requested=false 2024-11-20T13:23:34,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:34,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:34,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T13:23:34,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T13:23:34,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T13:23:34,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5250 sec 2024-11-20T13:23:34,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.5410 sec 2024-11-20T13:23:34,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:23:34,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:34,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:34,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:34,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:34,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9810d3db726c417eb2b4e19df61ee600 is 50, key is test_row_0/A:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741845_1021 (size=14341) 2024-11-20T13:23:34,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9810d3db726c417eb2b4e19df61ee600 2024-11-20T13:23:34,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7278a8b83b8345a58f5a1bb942ca510e is 50, key is test_row_0/B:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:34,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741846_1022 (size=12001) 2024-11-20T13:23:34,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7278a8b83b8345a58f5a1bb942ca510e 2024-11-20T13:23:34,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/b091cc6dbf3f4945b353c2a11dcd16f5 is 50, key is test_row_0/C:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:34,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741847_1023 (size=12001) 2024-11-20T13:23:34,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/b091cc6dbf3f4945b353c2a11dcd16f5 2024-11-20T13:23:34,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9810d3db726c417eb2b4e19df61ee600 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600 2024-11-20T13:23:34,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600, entries=200, sequenceid=51, filesize=14.0 K 2024-11-20T13:23:34,755 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:23:34,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7278a8b83b8345a58f5a1bb942ca510e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e 2024-11-20T13:23:34,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T13:23:34,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/b091cc6dbf3f4945b353c2a11dcd16f5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5 2024-11-20T13:23:34,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T13:23:34,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=120.76 KB/123660 for cbbdc72320da06253b5398d0c51c77ae in 257ms, sequenceid=51, compaction requested=true 2024-11-20T13:23:34,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:34,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:34,831 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:34,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:23:34,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:34,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:34,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:34,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:34,837 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:34,837 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:34,838 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:34,839 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:34,839 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.2 K 2024-11-20T13:23:34,840 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:34,840 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:34,840 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:34,840 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b96e0ff69c974e578f863b0fa6ca68be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732109013046 2024-11-20T13:23:34,840 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.4 K 2024-11-20T13:23:34,842 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4154fc76c16c4943a8f7cfd980028568, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732109013403 2024-11-20T13:23:34,842 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting be746f7c9fdb466d84fb2592a151cbf5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732109013046 2024-11-20T13:23:34,843 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7278a8b83b8345a58f5a1bb942ca510e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:34,844 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fcd3ff9ac0a4231a18d02c8eb8dd2b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732109013403 2024-11-20T13:23:34,845 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9810d3db726c417eb2b4e19df61ee600, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:34,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/ccb4bd314ade43249daaadd5407c7537 is 50, key is test_row_0/A:col10/1732109014830/Put/seqid=0 2024-11-20T13:23:34,876 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T13:23:34,884 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T13:23:34,887 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#10 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:34,888 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c93696e6180941aab637c9310fe6d843 is 50, key is test_row_0/B:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:34,895 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#11 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:34,896 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9bde4a4abccb4202962fe8af8c8c9918 is 50, key is test_row_0/A:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:34,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741848_1024 (size=23705) 2024-11-20T13:23:34,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/ccb4bd314ade43249daaadd5407c7537 2024-11-20T13:23:34,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741849_1025 (size=12104) 2024-11-20T13:23:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109074867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109074863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109074867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,939 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c93696e6180941aab637c9310fe6d843 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c93696e6180941aab637c9310fe6d843 2024-11-20T13:23:34,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109074910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741850_1026 (size=12104) 2024-11-20T13:23:34,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/8b91a90232d342d5a8d457e2e1fe9e4e is 50, key is test_row_0/B:col10/1732109014830/Put/seqid=0 2024-11-20T13:23:34,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109074907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:34,969 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into c93696e6180941aab637c9310fe6d843(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:34,969 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:34,969 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109014831; duration=0sec 2024-11-20T13:23:34,970 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:34,970 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:34,970 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:34,979 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9bde4a4abccb4202962fe8af8c8c9918 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9bde4a4abccb4202962fe8af8c8c9918 2024-11-20T13:23:34,981 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:34,981 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:34,981 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:34,982 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.2 K 2024-11-20T13:23:34,986 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting db39da6ddaf34b9392e58b8c1dbe9d3a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732109013046 2024-11-20T13:23:34,989 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 922e9546d60d479b84139d8b5136e065, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732109013403 2024-11-20T13:23:34,994 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b091cc6dbf3f4945b353c2a11dcd16f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:34,999 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 9bde4a4abccb4202962fe8af8c8c9918(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:34,999 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:34,999 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109014820; duration=0sec 2024-11-20T13:23:34,999 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:34,999 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741851_1027 (size=12001) 2024-11-20T13:23:35,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/8b91a90232d342d5a8d457e2e1fe9e4e 2024-11-20T13:23:35,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109075023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/bbeb7d315d5949578f3fca61367fde4c is 50, key is test_row_0/C:col10/1732109014830/Put/seqid=0 2024-11-20T13:23:35,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109075049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109075049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,067 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#14 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:35,069 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/dc63bf2ba86b492c87110e141ee6ceb9 is 50, key is test_row_0/C:col10/1732109014160/Put/seqid=0 2024-11-20T13:23:35,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109075061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109075068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T13:23:35,098 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T13:23:35,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T13:23:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T13:23:35,109 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:35,111 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:35,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:35,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741852_1028 (size=12001) 2024-11-20T13:23:35,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/bbeb7d315d5949578f3fca61367fde4c 2024-11-20T13:23:35,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/ccb4bd314ade43249daaadd5407c7537 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537 2024-11-20T13:23:35,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537, entries=400, sequenceid=77, filesize=23.1 K 2024-11-20T13:23:35,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/8b91a90232d342d5a8d457e2e1fe9e4e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e 2024-11-20T13:23:35,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741853_1029 (size=12104) 2024-11-20T13:23:35,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T13:23:35,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/bbeb7d315d5949578f3fca61367fde4c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c 2024-11-20T13:23:35,170 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/dc63bf2ba86b492c87110e141ee6ceb9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/dc63bf2ba86b492c87110e141ee6ceb9 2024-11-20T13:23:35,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T13:23:35,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cbbdc72320da06253b5398d0c51c77ae in 349ms, sequenceid=77, compaction requested=false 2024-11-20T13:23:35,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:35,182 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into dc63bf2ba86b492c87110e141ee6ceb9(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:35,183 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:35,183 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109014831; duration=0sec 2024-11-20T13:23:35,183 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:35,183 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T13:23:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:35,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:23:35,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:35,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:35,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:35,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/174c1e5841d8460f955af7ced5954923 is 50, key is test_row_0/A:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,270 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T13:23:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741854_1030 (size=11997) 2024-11-20T13:23:35,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/174c1e5841d8460f955af7ced5954923 2024-11-20T13:23:35,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7c0973046e9d4ed19ecc2b74446a3bcd is 50, key is test_row_0/B:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741855_1031 (size=9657) 2024-11-20T13:23:35,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7c0973046e9d4ed19ecc2b74446a3bcd 2024-11-20T13:23:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T13:23:35,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/604e2c6ac417422bb304623116e36588 is 50, key is test_row_0/C:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T13:23:35,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:35,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:35,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741856_1032 (size=9657) 2024-11-20T13:23:35,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/604e2c6ac417422bb304623116e36588 2024-11-20T13:23:35,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/174c1e5841d8460f955af7ced5954923 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923 2024-11-20T13:23:35,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109075467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T13:23:35,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/7c0973046e9d4ed19ecc2b74446a3bcd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd 2024-11-20T13:23:35,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109075459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109075477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109075477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109075488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd, entries=100, sequenceid=91, filesize=9.4 K 2024-11-20T13:23:35,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/604e2c6ac417422bb304623116e36588 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588 2024-11-20T13:23:35,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588, entries=100, sequenceid=91, filesize=9.4 K 2024-11-20T13:23:35,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for cbbdc72320da06253b5398d0c51c77ae in 280ms, sequenceid=91, compaction requested=true 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:35,522 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:35,522 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:35,525 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47806 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:35,526 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:35,526 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,526 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9bde4a4abccb4202962fe8af8c8c9918, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=46.7 K 2024-11-20T13:23:35,526 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:35,526 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:35,527 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,527 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c93696e6180941aab637c9310fe6d843, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=33.0 K 2024-11-20T13:23:35,528 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bde4a4abccb4202962fe8af8c8c9918, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:35,529 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccb4bd314ade43249daaadd5407c7537, keycount=400, bloomtype=ROW, size=23.1 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732109014687 2024-11-20T13:23:35,530 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 174c1e5841d8460f955af7ced5954923, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014860 2024-11-20T13:23:35,532 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c93696e6180941aab637c9310fe6d843, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:35,533 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b91a90232d342d5a8d457e2e1fe9e4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732109014823 2024-11-20T13:23:35,534 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c0973046e9d4ed19ecc2b74446a3bcd, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014899 2024-11-20T13:23:35,577 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#18 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:35,579 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/63c2326d023d463cb2ed28e6794f24ca is 50, key is test_row_0/A:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,588 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#19 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:35,589 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d0d00a13478049ff8f131182c009b2f9 is 50, key is test_row_0/B:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T13:23:35,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,594 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:23:35,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:35,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:35,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:35,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:35,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:35,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109075625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109075626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109075626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109075635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109075644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741858_1034 (size=12207) 2024-11-20T13:23:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741857_1033 (size=12207) 2024-11-20T13:23:35,679 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d0d00a13478049ff8f131182c009b2f9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0d00a13478049ff8f131182c009b2f9 2024-11-20T13:23:35,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b688ab5b865745da97bf42a25fa6adef is 50, key is test_row_0/A:col10/1732109015407/Put/seqid=0 2024-11-20T13:23:35,704 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into d0d00a13478049ff8f131182c009b2f9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:35,704 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:35,704 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109015522; duration=0sec 2024-11-20T13:23:35,705 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:35,705 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:35,705 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:35,708 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:35,708 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:35,708 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:35,709 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/dc63bf2ba86b492c87110e141ee6ceb9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=33.0 K 2024-11-20T13:23:35,710 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting dc63bf2ba86b492c87110e141ee6ceb9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732109014158 2024-11-20T13:23:35,711 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting bbeb7d315d5949578f3fca61367fde4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732109014823 2024-11-20T13:23:35,712 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 604e2c6ac417422bb304623116e36588, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014899 2024-11-20T13:23:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T13:23:35,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109075739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109075741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109075741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109075742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T13:23:35,752 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T13:23:35,753 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T13:23:35,753 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T13:23:35,755 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T13:23:35,755 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T13:23:35,755 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T13:23:35,755 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T13:23:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741859_1035 (size=12001) 2024-11-20T13:23:35,757 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T13:23:35,757 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T13:23:35,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,758 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b688ab5b865745da97bf42a25fa6adef 2024-11-20T13:23:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109075749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,769 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#21 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:35,772 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/0fd5ed5661a140799dbae356d3e575eb is 50, key is test_row_0/C:col10/1732109015237/Put/seqid=0 2024-11-20T13:23:35,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/83609fe96ac840f1973522b60bb15316 is 50, key is test_row_0/B:col10/1732109015407/Put/seqid=0 2024-11-20T13:23:35,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741860_1036 (size=12207) 2024-11-20T13:23:35,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741861_1037 (size=12001) 2024-11-20T13:23:35,842 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/0fd5ed5661a140799dbae356d3e575eb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/0fd5ed5661a140799dbae356d3e575eb 2024-11-20T13:23:35,844 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/83609fe96ac840f1973522b60bb15316 2024-11-20T13:23:35,865 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into 0fd5ed5661a140799dbae356d3e575eb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:35,865 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:35,865 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109015522; duration=0sec 2024-11-20T13:23:35,866 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:35,866 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:35,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cd51b962ac76454ab5594384998e0db8 is 50, key is test_row_0/C:col10/1732109015407/Put/seqid=0 2024-11-20T13:23:35,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741862_1038 (size=12001) 2024-11-20T13:23:35,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109075955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,959 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cd51b962ac76454ab5594384998e0db8 2024-11-20T13:23:35,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109075956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109075948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b688ab5b865745da97bf42a25fa6adef as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef 2024-11-20T13:23:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109075956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109075961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:35,992 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T13:23:36,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/83609fe96ac840f1973522b60bb15316 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316 2024-11-20T13:23:36,014 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T13:23:36,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cd51b962ac76454ab5594384998e0db8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8 2024-11-20T13:23:36,042 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T13:23:36,046 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for cbbdc72320da06253b5398d0c51c77ae in 452ms, sequenceid=118, compaction requested=false 2024-11-20T13:23:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T13:23:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T13:23:36,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T13:23:36,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 940 msec 2024-11-20T13:23:36,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 953 msec 2024-11-20T13:23:36,075 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/63c2326d023d463cb2ed28e6794f24ca as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/63c2326d023d463cb2ed28e6794f24ca 2024-11-20T13:23:36,090 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 63c2326d023d463cb2ed28e6794f24ca(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:36,090 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:36,090 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109015522; duration=0sec 2024-11-20T13:23:36,090 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:36,090 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T13:23:36,220 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T13:23:36,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:36,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T13:23:36,250 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:36,252 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:36,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:36,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:23:36,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:36,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:36,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:36,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:36,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:36,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:36,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:36,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/4a5282a3b95d43e0a2becb39e281ad9b is 50, key is test_row_0/A:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:36,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741863_1039 (size=14441) 2024-11-20T13:23:36,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/4a5282a3b95d43e0a2becb39e281ad9b 2024-11-20T13:23:36,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa299044f09043a49dc1f69910fc2f1e is 50, key is test_row_0/B:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:36,410 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T13:23:36,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:36,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109076408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109076415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109076426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109076430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109076406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741864_1040 (size=12051) 2024-11-20T13:23:36,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109076533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109076542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109076543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109076544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109076544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:36,576 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T13:23:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T13:23:36,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:36,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109076740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109076750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109076750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109076751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109076747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa299044f09043a49dc1f69910fc2f1e 2024-11-20T13:23:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:36,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cfe492969720440ebc79fbeae68fe128 is 50, key is test_row_0/C:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:36,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:36,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T13:23:36,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:36,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:36,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:36,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741865_1041 (size=12051) 2024-11-20T13:23:36,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cfe492969720440ebc79fbeae68fe128 2024-11-20T13:23:36,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/4a5282a3b95d43e0a2becb39e281ad9b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b 2024-11-20T13:23:36,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b, entries=200, sequenceid=133, filesize=14.1 K 2024-11-20T13:23:36,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa299044f09043a49dc1f69910fc2f1e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e 2024-11-20T13:23:36,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T13:23:36,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/cfe492969720440ebc79fbeae68fe128 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128 2024-11-20T13:23:37,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T13:23:37,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for cbbdc72320da06253b5398d0c51c77ae in 740ms, sequenceid=133, compaction requested=true 2024-11-20T13:23:37,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:37,014 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:37,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:37,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:37,015 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:37,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:37,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:37,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:37,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:37,016 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38649 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:37,017 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:37,017 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:37,017 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/63c2326d023d463cb2ed28e6794f24ca, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.7 K 2024-11-20T13:23:37,017 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:37,017 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:37,018 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:37,018 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c2326d023d463cb2ed28e6794f24ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014823 2024-11-20T13:23:37,018 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0d00a13478049ff8f131182c009b2f9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.4 K 2024-11-20T13:23:37,018 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b688ab5b865745da97bf42a25fa6adef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732109015407 2024-11-20T13:23:37,019 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d0d00a13478049ff8f131182c009b2f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014823 2024-11-20T13:23:37,019 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a5282a3b95d43e0a2becb39e281ad9b, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:37,020 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 83609fe96ac840f1973522b60bb15316, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732109015407 2024-11-20T13:23:37,023 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting aa299044f09043a49dc1f69910fc2f1e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:37,052 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:37,053 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9ca469f82d0a4db3b92d50c4ffd99449 is 50, key is test_row_0/A:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:37,066 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,082 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#28 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:37,083 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/edaf0aa1aa8945aa916a5a9ac405195e is 50, key is test_row_0/B:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:37,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T13:23:37,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:37,095 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:23:37,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:37,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:37,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:37,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:37,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:37,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:37,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741866_1042 (size=12359) 2024-11-20T13:23:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109077133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109077145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109077146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109077149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741867_1043 (size=12359) 2024-11-20T13:23:37,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9b933139d12b48988edbd809f1cbe969 is 50, key is test_row_0/A:col10/1732109016406/Put/seqid=0 2024-11-20T13:23:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109077141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,220 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/edaf0aa1aa8945aa916a5a9ac405195e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/edaf0aa1aa8945aa916a5a9ac405195e 2024-11-20T13:23:37,235 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into edaf0aa1aa8945aa916a5a9ac405195e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:37,235 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:37,235 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109017014; duration=0sec 2024-11-20T13:23:37,235 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:37,235 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:37,236 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:37,240 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:37,240 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:37,240 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:37,240 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/0fd5ed5661a140799dbae356d3e575eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.4 K 2024-11-20T13:23:37,241 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fd5ed5661a140799dbae356d3e575eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732109014823 2024-11-20T13:23:37,242 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cd51b962ac76454ab5594384998e0db8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732109015407 2024-11-20T13:23:37,243 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cfe492969720440ebc79fbeae68fe128, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:37,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741868_1044 (size=14541) 2024-11-20T13:23:37,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109077249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,259 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9b933139d12b48988edbd809f1cbe969 2024-11-20T13:23:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109077258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109077261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109077261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,279 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#30 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:37,281 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/43f33a4976344584ba46152c5be414c5 is 50, key is test_row_0/C:col10/1732109016269/Put/seqid=0 2024-11-20T13:23:37,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2c84ef22b73049cea22fd707ad3d567e is 50, key is test_row_0/B:col10/1732109016406/Put/seqid=0 2024-11-20T13:23:37,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109077291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741869_1045 (size=12359) 2024-11-20T13:23:37,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741870_1046 (size=12151) 2024-11-20T13:23:37,351 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2c84ef22b73049cea22fd707ad3d567e 2024-11-20T13:23:37,363 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/43f33a4976344584ba46152c5be414c5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/43f33a4976344584ba46152c5be414c5 2024-11-20T13:23:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:37,380 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into 43f33a4976344584ba46152c5be414c5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:37,380 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:37,380 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109017015; duration=0sec 2024-11-20T13:23:37,381 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:37,381 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:37,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/c3d121ac0ed34e3893a46a168eaf7115 is 50, key is test_row_0/C:col10/1732109016406/Put/seqid=0 2024-11-20T13:23:37,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741871_1047 (size=12151) 2024-11-20T13:23:37,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109077459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109077468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109077467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109077468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109077498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,527 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9ca469f82d0a4db3b92d50c4ffd99449 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9ca469f82d0a4db3b92d50c4ffd99449 2024-11-20T13:23:37,547 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 9ca469f82d0a4db3b92d50c4ffd99449(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:37,547 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:37,547 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109017014; duration=0sec 2024-11-20T13:23:37,547 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:37,547 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:37,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109077770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109077777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109077779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109077778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109077808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:37,831 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/c3d121ac0ed34e3893a46a168eaf7115 2024-11-20T13:23:37,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/9b933139d12b48988edbd809f1cbe969 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969 2024-11-20T13:23:37,870 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969, entries=200, sequenceid=159, filesize=14.2 K 2024-11-20T13:23:37,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2c84ef22b73049cea22fd707ad3d567e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e 2024-11-20T13:23:37,886 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T13:23:37,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/c3d121ac0ed34e3893a46a168eaf7115 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115 2024-11-20T13:23:37,908 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T13:23:37,912 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for cbbdc72320da06253b5398d0c51c77ae in 817ms, sequenceid=159, compaction requested=false 2024-11-20T13:23:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T13:23:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T13:23:37,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T13:23:37,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6680 sec 2024-11-20T13:23:37,936 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.7080 sec 2024-11-20T13:23:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:38,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:38,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/fd56f71b82474865b85708891bdd6326 is 50, key is test_row_0/A:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T13:23:38,373 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T13:23:38,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T13:23:38,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:38,393 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:38,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:38,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741872_1048 (size=12151) 2024-11-20T13:23:38,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/fd56f71b82474865b85708891bdd6326 2024-11-20T13:23:38,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109078443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109078452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109078460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109078473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c7c67122e4524ca9baa9d2cb63e720f1 is 50, key is test_row_0/B:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:38,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109078484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109078577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741873_1049 (size=12151) 2024-11-20T13:23:38,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109078581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109078598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109078593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109078610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:38,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:38,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:38,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109078802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109078834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109078834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109078833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109078836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:38,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:38,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:39,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c7c67122e4524ca9baa9d2cb63e720f1 2024-11-20T13:23:39,060 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:39,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:39,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/83ebb54179c041ceb396c075c5bef4d6 is 50, key is test_row_0/C:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:39,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109079118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109079149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741874_1050 (size=12151) 2024-11-20T13:23:39,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109079154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109079154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/83ebb54179c041ceb396c075c5bef4d6 2024-11-20T13:23:39,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109079161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/fd56f71b82474865b85708891bdd6326 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326 2024-11-20T13:23:39,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T13:23:39,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c7c67122e4524ca9baa9d2cb63e720f1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1 2024-11-20T13:23:39,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:39,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:39,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T13:23:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:39,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/83ebb54179c041ceb396c075c5bef4d6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6 2024-11-20T13:23:39,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T13:23:39,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for cbbdc72320da06253b5398d0c51c77ae in 976ms, sequenceid=175, compaction requested=true 2024-11-20T13:23:39,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:39,285 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:39,286 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:39,288 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:39,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:39,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:39,288 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:39,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:39,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:39,288 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,288 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9ca469f82d0a4db3b92d50c4ffd99449, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=38.1 K 2024-11-20T13:23:39,289 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:39,289 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:39,289 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,290 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/edaf0aa1aa8945aa916a5a9ac405195e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.8 K 2024-11-20T13:23:39,290 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ca469f82d0a4db3b92d50c4ffd99449, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:39,290 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting edaf0aa1aa8945aa916a5a9ac405195e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:39,291 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b933139d12b48988edbd809f1cbe969, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732109016380 2024-11-20T13:23:39,291 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c84ef22b73049cea22fd707ad3d567e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732109016404 2024-11-20T13:23:39,292 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd56f71b82474865b85708891bdd6326, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:39,293 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c7c67122e4524ca9baa9d2cb63e720f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:39,331 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#36 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:39,334 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#37 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:39,336 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/6db885bb1e964275b2fbb4bc8366ce95 is 50, key is test_row_0/A:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:39,332 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/52a6a661208c45de81e7f18bb8fb647a is 50, key is test_row_0/B:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:39,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741875_1051 (size=12561) 2024-11-20T13:23:39,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741876_1052 (size=12561) 2024-11-20T13:23:39,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T13:23:39,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,398 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:23:39,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:39,419 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/52a6a661208c45de81e7f18bb8fb647a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52a6a661208c45de81e7f18bb8fb647a 2024-11-20T13:23:39,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/53ea0b65940b4060abd2a8382b24f416 is 50, key is test_row_0/A:col10/1732109018466/Put/seqid=0 2024-11-20T13:23:39,459 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into 52a6a661208c45de81e7f18bb8fb647a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:39,459 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:39,459 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109019286; duration=0sec 2024-11-20T13:23:39,459 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:39,459 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:39,459 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:39,461 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:39,462 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:39,462 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:39,462 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/43f33a4976344584ba46152c5be414c5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=35.8 K 2024-11-20T13:23:39,463 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 43f33a4976344584ba46152c5be414c5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732109015609 2024-11-20T13:23:39,463 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c3d121ac0ed34e3893a46a168eaf7115, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732109016404 2024-11-20T13:23:39,465 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ebb54179c041ceb396c075c5bef4d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:39,478 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/6db885bb1e964275b2fbb4bc8366ce95 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/6db885bb1e964275b2fbb4bc8366ce95 2024-11-20T13:23:39,499 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 6db885bb1e964275b2fbb4bc8366ce95(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:39,499 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:39,499 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109019285; duration=0sec 2024-11-20T13:23:39,499 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:39,499 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:39,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:39,513 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:39,514 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/f7f52ba60a7a4f249c494f3f59d31646 is 50, key is test_row_0/C:col10/1732109018303/Put/seqid=0 2024-11-20T13:23:39,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741877_1053 (size=12151) 2024-11-20T13:23:39,541 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/53ea0b65940b4060abd2a8382b24f416 2024-11-20T13:23:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9772a4a7ad434beba149fe945da7eb5d is 50, key is test_row_0/B:col10/1732109018466/Put/seqid=0 2024-11-20T13:23:39,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741878_1054 (size=12561) 2024-11-20T13:23:39,647 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/f7f52ba60a7a4f249c494f3f59d31646 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f7f52ba60a7a4f249c494f3f59d31646 2024-11-20T13:23:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:39,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:39,663 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into f7f52ba60a7a4f249c494f3f59d31646(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:39,663 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:39,663 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109019288; duration=0sec 2024-11-20T13:23:39,663 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:39,663 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:39,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741879_1055 (size=12151) 2024-11-20T13:23:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109079716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109079717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109079728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109079731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109079736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109079830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109079847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109079852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109079854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:39,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109079852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109080053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109080059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109080062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109080075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109080070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9772a4a7ad434beba149fe945da7eb5d 2024-11-20T13:23:40,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4cdc6b38f8b340e09a2389de2bb1481b is 50, key is test_row_0/C:col10/1732109018466/Put/seqid=0 2024-11-20T13:23:40,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741880_1056 (size=12151) 2024-11-20T13:23:40,216 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4cdc6b38f8b340e09a2389de2bb1481b 2024-11-20T13:23:40,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/53ea0b65940b4060abd2a8382b24f416 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416 2024-11-20T13:23:40,298 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416, entries=150, sequenceid=199, filesize=11.9 K 2024-11-20T13:23:40,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9772a4a7ad434beba149fe945da7eb5d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d 2024-11-20T13:23:40,354 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d, entries=150, sequenceid=199, filesize=11.9 K 2024-11-20T13:23:40,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4cdc6b38f8b340e09a2389de2bb1481b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b 2024-11-20T13:23:40,382 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b, entries=150, sequenceid=199, filesize=11.9 K 2024-11-20T13:23:40,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for cbbdc72320da06253b5398d0c51c77ae in 986ms, sequenceid=199, compaction requested=false 2024-11-20T13:23:40,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:40,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:40,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T13:23:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T13:23:40,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T13:23:40,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0020 sec 2024-11-20T13:23:40,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.0150 sec 2024-11-20T13:23:40,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:23:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:40,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:40,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:40,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:40,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:40,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:40,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:40,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b767cef5c7bf467f80c75776097c7abd is 50, key is test_row_0/A:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T13:23:40,546 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T13:23:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741881_1057 (size=16931) 2024-11-20T13:23:40,563 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T13:23:40,570 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:40,572 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:40,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:40,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109080574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109080575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109080578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109080583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109080579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:40,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109080695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109080698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109080706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109080717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:40,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:40,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:40,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109080705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:40,884 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:40,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:40,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:40,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:40,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:40,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109080906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109080913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109080934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109080934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109080941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:40,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b767cef5c7bf467f80c75776097c7abd 2024-11-20T13:23:40,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/65549fbb975449e799cc9e1b49af9967 is 50, key is test_row_0/B:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:41,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741882_1058 (size=12151) 2024-11-20T13:23:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:41,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:41,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:41,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109081229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109081237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109081239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109081239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109081262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,364 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/65549fbb975449e799cc9e1b49af9967 2024-11-20T13:23:41,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d793372f8d2f44d8b85fe0c3a0332ab8 is 50, key is test_row_0/C:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:41,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:41,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:41,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:41,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741883_1059 (size=12151) 2024-11-20T13:23:41,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d793372f8d2f44d8b85fe0c3a0332ab8 2024-11-20T13:23:41,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b767cef5c7bf467f80c75776097c7abd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd 2024-11-20T13:23:41,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd, entries=250, sequenceid=218, filesize=16.5 K 2024-11-20T13:23:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/65549fbb975449e799cc9e1b49af9967 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967 2024-11-20T13:23:41,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967, entries=150, sequenceid=218, filesize=11.9 K 2024-11-20T13:23:41,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d793372f8d2f44d8b85fe0c3a0332ab8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8 2024-11-20T13:23:41,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8, entries=150, sequenceid=218, filesize=11.9 K 2024-11-20T13:23:41,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for cbbdc72320da06253b5398d0c51c77ae in 1242ms, sequenceid=218, compaction requested=true 2024-11-20T13:23:41,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:41,651 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:41,653 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:41,653 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:41,653 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,653 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/6db885bb1e964275b2fbb4bc8366ce95, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=40.7 K 2024-11-20T13:23:41,654 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6db885bb1e964275b2fbb4bc8366ce95, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:41,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53ea0b65940b4060abd2a8382b24f416, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732109018451 2024-11-20T13:23:41,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b767cef5c7bf467f80c75776097c7abd, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019693 2024-11-20T13:23:41,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:41,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:41,666 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:41,667 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:41,667 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:41,667 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,668 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52a6a661208c45de81e7f18bb8fb647a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.0 K 2024-11-20T13:23:41,668 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 52a6a661208c45de81e7f18bb8fb647a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:41,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9772a4a7ad434beba149fe945da7eb5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732109018451 2024-11-20T13:23:41,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 65549fbb975449e799cc9e1b49af9967, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019719 2024-11-20T13:23:41,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:41,681 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#45 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:41,682 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b5a950a42fed4530a3325ae2ca77885d is 50, key is test_row_0/A:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:41,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T13:23:41,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,687 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:41,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:41,699 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:41,700 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d1b1bb577af64b46bcf444c319165722 is 50, key is test_row_0/B:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:41,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/347b899aa9a2483487a0887f32e82dbe is 50, key is test_row_0/A:col10/1732109020565/Put/seqid=0 2024-11-20T13:23:41,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:41,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:41,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741884_1060 (size=12663) 2024-11-20T13:23:41,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741885_1061 (size=12663) 2024-11-20T13:23:41,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741886_1062 (size=12151) 2024-11-20T13:23:41,844 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/b5a950a42fed4530a3325ae2ca77885d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b5a950a42fed4530a3325ae2ca77885d 2024-11-20T13:23:41,847 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/347b899aa9a2483487a0887f32e82dbe 2024-11-20T13:23:41,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109081812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109081836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109081850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,864 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d1b1bb577af64b46bcf444c319165722 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d1b1bb577af64b46bcf444c319165722 2024-11-20T13:23:41,870 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into b5a950a42fed4530a3325ae2ca77885d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:41,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:41,870 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109021651; duration=0sec 2024-11-20T13:23:41,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:41,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:41,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:41,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d0cd3ed54f144c3ea0d32591400648eb is 50, key is test_row_0/B:col10/1732109020565/Put/seqid=0 2024-11-20T13:23:41,879 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:41,879 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:41,879 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:41,879 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f7f52ba60a7a4f249c494f3f59d31646, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.0 K 2024-11-20T13:23:41,880 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7f52ba60a7a4f249c494f3f59d31646, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732109017141 2024-11-20T13:23:41,881 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cdc6b38f8b340e09a2389de2bb1481b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732109018451 2024-11-20T13:23:41,883 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into d1b1bb577af64b46bcf444c319165722(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:41,883 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d793372f8d2f44d8b85fe0c3a0332ab8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019719 2024-11-20T13:23:41,883 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:41,883 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109021666; duration=0sec 2024-11-20T13:23:41,883 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:41,883 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:41,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109081861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109081860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,918 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:41,919 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/aaf8890ee98f4c1b9dcce2468c5efaa4 is 50, key is test_row_0/C:col10/1732109019719/Put/seqid=0 2024-11-20T13:23:41,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741887_1063 (size=12151) 2024-11-20T13:23:41,927 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d0cd3ed54f144c3ea0d32591400648eb 2024-11-20T13:23:41,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109081961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109081967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109081968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:41,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/e626963d4cef4441a0f51a14dc207f22 is 50, key is test_row_0/C:col10/1732109020565/Put/seqid=0 2024-11-20T13:23:41,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741888_1064 (size=12663) 2024-11-20T13:23:42,018 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/aaf8890ee98f4c1b9dcce2468c5efaa4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/aaf8890ee98f4c1b9dcce2468c5efaa4 2024-11-20T13:23:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109082011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109082025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,033 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into aaf8890ee98f4c1b9dcce2468c5efaa4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:42,033 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:42,033 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109021692; duration=0sec 2024-11-20T13:23:42,033 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:42,033 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:42,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741889_1065 (size=12151) 2024-11-20T13:23:42,066 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/e626963d4cef4441a0f51a14dc207f22 2024-11-20T13:23:42,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/347b899aa9a2483487a0887f32e82dbe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe 2024-11-20T13:23:42,085 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe, entries=150, sequenceid=239, filesize=11.9 K 2024-11-20T13:23:42,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d0cd3ed54f144c3ea0d32591400648eb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb 2024-11-20T13:23:42,093 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb, entries=150, sequenceid=239, filesize=11.9 K 2024-11-20T13:23:42,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/e626963d4cef4441a0f51a14dc207f22 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22 2024-11-20T13:23:42,107 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22, entries=150, sequenceid=239, filesize=11.9 K 2024-11-20T13:23:42,108 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for cbbdc72320da06253b5398d0c51c77ae in 422ms, sequenceid=239, compaction requested=false 2024-11-20T13:23:42,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:42,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T13:23:42,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T13:23:42,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T13:23:42,113 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5380 sec 2024-11-20T13:23:42,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.5510 sec 2024-11-20T13:23:42,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:42,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:42,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5afb8958c9254586b305f32b388136d6 is 50, key is test_row_0/A:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109082229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741890_1066 (size=14591) 2024-11-20T13:23:42,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5afb8958c9254586b305f32b388136d6 2024-11-20T13:23:42,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109082233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109082233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109082238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109082236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c061b124668e4ded90dfbddfa796071e is 50, key is test_row_0/B:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741891_1067 (size=12201) 2024-11-20T13:23:42,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c061b124668e4ded90dfbddfa796071e 2024-11-20T13:23:42,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109082343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/25bdf48243044d658edbb40871d54325 is 50, key is test_row_0/C:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109082349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109082350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109082351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741892_1068 (size=12201) 2024-11-20T13:23:42,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/25bdf48243044d658edbb40871d54325 2024-11-20T13:23:42,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5afb8958c9254586b305f32b388136d6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6 2024-11-20T13:23:42,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6, entries=200, sequenceid=259, filesize=14.2 K 2024-11-20T13:23:42,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c061b124668e4ded90dfbddfa796071e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e 2024-11-20T13:23:42,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T13:23:42,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/25bdf48243044d658edbb40871d54325 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325 2024-11-20T13:23:42,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325, entries=150, sequenceid=259, filesize=11.9 K 2024-11-20T13:23:42,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for cbbdc72320da06253b5398d0c51c77ae in 312ms, sequenceid=259, compaction requested=true 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:42,493 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:42,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:23:42,499 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:42,501 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:42,502 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:42,502 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,502 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b5a950a42fed4530a3325ae2ca77885d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=38.5 K 2024-11-20T13:23:42,503 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5a950a42fed4530a3325ae2ca77885d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019719 2024-11-20T13:23:42,503 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 347b899aa9a2483487a0887f32e82dbe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732109020555 2024-11-20T13:23:42,504 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5afb8958c9254586b305f32b388136d6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:42,510 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:42,510 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:42,510 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,510 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d1b1bb577af64b46bcf444c319165722, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.1 K 2024-11-20T13:23:42,517 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d1b1bb577af64b46bcf444c319165722, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019719 2024-11-20T13:23:42,518 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d0cd3ed54f144c3ea0d32591400648eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732109020555 2024-11-20T13:23:42,518 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c061b124668e4ded90dfbddfa796071e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:42,535 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#54 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:42,536 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/2d3fe302fa4b475797cd4cb196e18a3c is 50, key is test_row_0/A:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,551 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:42,552 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/49876362d5a443059d015428fb5962ce is 50, key is test_row_0/B:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T13:23:42,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:42,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:42,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/bf999d7263b84a7bbf8d8ac33872bd9a is 50, key is test_row_0/A:col10/1732109022236/Put/seqid=0 2024-11-20T13:23:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741893_1069 (size=12815) 2024-11-20T13:23:42,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741894_1070 (size=12815) 2024-11-20T13:23:42,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109082624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,670 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/49876362d5a443059d015428fb5962ce as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/49876362d5a443059d015428fb5962ce 2024-11-20T13:23:42,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109082651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T13:23:42,681 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T13:23:42,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109082660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109082675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T13:23:42,695 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:42,695 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into 49876362d5a443059d015428fb5962ce(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:42,695 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:42,695 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109022493; duration=0sec 2024-11-20T13:23:42,695 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:42,695 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:42,695 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:42,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:42,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109082676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,703 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:42,703 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:42,703 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,703 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/aaf8890ee98f4c1b9dcce2468c5efaa4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.1 K 2024-11-20T13:23:42,704 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting aaf8890ee98f4c1b9dcce2468c5efaa4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732109019719 2024-11-20T13:23:42,706 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e626963d4cef4441a0f51a14dc207f22, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732109020555 2024-11-20T13:23:42,706 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 25bdf48243044d658edbb40871d54325, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741895_1071 (size=14741) 2024-11-20T13:23:42,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/bf999d7263b84a7bbf8d8ac33872bd9a 2024-11-20T13:23:42,753 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:42,754 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/e51cbb68c27c4c56b0873925bf4026f8 is 50, key is test_row_0/C:col10/1732109021851/Put/seqid=0 2024-11-20T13:23:42,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/6562944edfea41c6905b85be6f10f762 is 50, key is test_row_0/B:col10/1732109022236/Put/seqid=0 2024-11-20T13:23:42,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109082756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:42,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109082776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109082787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109082788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109082808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741896_1072 (size=12815) 2024-11-20T13:23:42,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741897_1073 (size=12301) 2024-11-20T13:23:42,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/6562944edfea41c6905b85be6f10f762 2024-11-20T13:23:42,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:42,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T13:23:42,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:42,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:42,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8c3832b7ea2f4f9d8031174e55c48c99 is 50, key is test_row_0/C:col10/1732109022236/Put/seqid=0 2024-11-20T13:23:42,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741898_1074 (size=12301) 2024-11-20T13:23:43,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:43,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109083003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T13:23:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,018 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/2d3fe302fa4b475797cd4cb196e18a3c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2d3fe302fa4b475797cd4cb196e18a3c 2024-11-20T13:23:43,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109083010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109083017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,027 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 2d3fe302fa4b475797cd4cb196e18a3c(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:43,027 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:43,028 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109022493; duration=0sec 2024-11-20T13:23:43,028 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:43,028 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:43,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109083021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109083029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T13:23:43,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:43,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,233 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/e51cbb68c27c4c56b0873925bf4026f8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e51cbb68c27c4c56b0873925bf4026f8 2024-11-20T13:23:43,242 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into e51cbb68c27c4c56b0873925bf4026f8(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:43,242 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:43,242 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109022493; duration=0sec 2024-11-20T13:23:43,242 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:43,242 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:43,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8c3832b7ea2f4f9d8031174e55c48c99 2024-11-20T13:23:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:43,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/bf999d7263b84a7bbf8d8ac33872bd9a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a 2024-11-20T13:23:43,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109083315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T13:23:43,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a, entries=200, sequenceid=279, filesize=14.4 K 2024-11-20T13:23:43,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:43,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/6562944edfea41c6905b85be6f10f762 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762 2024-11-20T13:23:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109083322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109083323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T13:23:43,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8c3832b7ea2f4f9d8031174e55c48c99 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99 2024-11-20T13:23:43,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109083342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109083346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T13:23:43,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for cbbdc72320da06253b5398d0c51c77ae in 792ms, sequenceid=279, compaction requested=false 2024-11-20T13:23:43,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:43,479 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T13:23:43,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:43,483 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T13:23:43,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:43,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/99baa019e97f493a95526566764de8d1 is 50, key is test_row_0/A:col10/1732109022646/Put/seqid=0 2024-11-20T13:23:43,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741899_1075 (size=12301) 2024-11-20T13:23:43,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:43,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:43,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:43,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109083935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,960 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/99baa019e97f493a95526566764de8d1 2024-11-20T13:23:43,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109083948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109083949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109083962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:43,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:43,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109083963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa6c19eb1797426aa118131df837fdc4 is 50, key is test_row_0/B:col10/1732109022646/Put/seqid=0 2024-11-20T13:23:44,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109084055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741900_1076 (size=12301) 2024-11-20T13:23:44,093 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa6c19eb1797426aa118131df837fdc4 2024-11-20T13:23:44,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109084081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109084081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109084082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109084085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/3d9e3b74336c4e2b80e9958d8df7e585 is 50, key is test_row_0/C:col10/1732109022646/Put/seqid=0 2024-11-20T13:23:44,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741901_1077 (size=12301) 2024-11-20T13:23:44,220 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/3d9e3b74336c4e2b80e9958d8df7e585 2024-11-20T13:23:44,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/99baa019e97f493a95526566764de8d1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1 2024-11-20T13:23:44,259 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T13:23:44,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/aa6c19eb1797426aa118131df837fdc4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4 2024-11-20T13:23:44,277 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T13:23:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/3d9e3b74336c4e2b80e9958d8df7e585 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585 2024-11-20T13:23:44,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109084287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,296 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T13:23:44,298 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for cbbdc72320da06253b5398d0c51c77ae in 817ms, sequenceid=299, compaction requested=true 2024-11-20T13:23:44,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:44,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:44,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T13:23:44,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T13:23:44,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T13:23:44,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6100 sec 2024-11-20T13:23:44,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.6220 sec 2024-11-20T13:23:44,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:44,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T13:23:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:44,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:44,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:44,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:44,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/8c418cf97eaf4d3eab3c2e519c5744d7 is 50, key is test_row_0/A:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:44,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109084365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109084370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109084376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741902_1078 (size=14741) 2024-11-20T13:23:44,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/8c418cf97eaf4d3eab3c2e519c5744d7 2024-11-20T13:23:44,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109084377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c30a8b2034ee4439bbb8eafb9ba58b5f is 50, key is test_row_0/B:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:44,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109084479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109084482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109084493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109084493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741903_1079 (size=12301) 2024-11-20T13:23:44,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c30a8b2034ee4439bbb8eafb9ba58b5f 2024-11-20T13:23:44,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8639171a3c44490a85446780405190df is 50, key is test_row_0/C:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:44,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109084602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741904_1080 (size=12301) 2024-11-20T13:23:44,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109084693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109084693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109084708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:44,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109084711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T13:23:44,811 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T13:23:44,813 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T13:23:44,824 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T13:23:44,825 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:44,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:44,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T13:23:44,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:44,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T13:23:44,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:44,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:44,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:44,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:44,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:44,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:45,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109085002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109085011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109085023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109085023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8639171a3c44490a85446780405190df 2024-11-20T13:23:45,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/8c418cf97eaf4d3eab3c2e519c5744d7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7 2024-11-20T13:23:45,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109085116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T13:23:45,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T13:23:45,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:45,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:45,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:45,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:45,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7, entries=200, sequenceid=320, filesize=14.4 K 2024-11-20T13:23:45,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/c30a8b2034ee4439bbb8eafb9ba58b5f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f 2024-11-20T13:23:45,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f, entries=150, sequenceid=320, filesize=12.0 K 2024-11-20T13:23:45,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/8639171a3c44490a85446780405190df as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df 2024-11-20T13:23:45,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df, entries=150, sequenceid=320, filesize=12.0 K 2024-11-20T13:23:45,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for cbbdc72320da06253b5398d0c51c77ae in 900ms, sequenceid=320, compaction requested=true 2024-11-20T13:23:45,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:45,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:45,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:45,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:45,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:23:45,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:45,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:23:45,220 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:45,220 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:45,229 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49718 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:45,229 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:45,229 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,229 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e51cbb68c27c4c56b0873925bf4026f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=48.6 K 2024-11-20T13:23:45,232 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e51cbb68c27c4c56b0873925bf4026f8, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:45,233 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c3832b7ea2f4f9d8031174e55c48c99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732109022214 2024-11-20T13:23:45,233 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d9e3b74336c4e2b80e9958d8df7e585, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109022617 2024-11-20T13:23:45,234 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8639171a3c44490a85446780405190df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:45,236 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54598 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:45,236 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:45,236 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,236 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2d3fe302fa4b475797cd4cb196e18a3c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=53.3 K 2024-11-20T13:23:45,240 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d3fe302fa4b475797cd4cb196e18a3c, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:45,244 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf999d7263b84a7bbf8d8ac33872bd9a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732109022214 2024-11-20T13:23:45,245 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99baa019e97f493a95526566764de8d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109022617 2024-11-20T13:23:45,247 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c418cf97eaf4d3eab3c2e519c5744d7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:45,281 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:45,282 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/5d8e40db79cb42f3a0ec905e41461032 is 50, key is test_row_0/C:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:45,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T13:23:45,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,289 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:23:45,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:45,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:45,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:45,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,310 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#67 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:45,312 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/df0f063fe9c4441ca62024472c644ae9 is 50, key is test_row_0/A:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:45,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/1a1b12b09d814636bae260e1f0110f26 is 50, key is test_row_0/A:col10/1732109024375/Put/seqid=0 2024-11-20T13:23:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741905_1081 (size=13051) 2024-11-20T13:23:45,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T13:23:45,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741906_1082 (size=13051) 2024-11-20T13:23:45,458 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/5d8e40db79cb42f3a0ec905e41461032 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/5d8e40db79cb42f3a0ec905e41461032 2024-11-20T13:23:45,470 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/df0f063fe9c4441ca62024472c644ae9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/df0f063fe9c4441ca62024472c644ae9 2024-11-20T13:23:45,475 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into 5d8e40db79cb42f3a0ec905e41461032(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:45,475 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:45,475 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=12, startTime=1732109025217; duration=0sec 2024-11-20T13:23:45,475 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:45,475 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:45,476 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:45,481 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49718 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:45,481 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:45,482 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,482 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/49876362d5a443059d015428fb5962ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=48.6 K 2024-11-20T13:23:45,483 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 49876362d5a443059d015428fb5962ce, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732109021789 2024-11-20T13:23:45,483 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6562944edfea41c6905b85be6f10f762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732109022214 2024-11-20T13:23:45,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741907_1083 (size=12301) 2024-11-20T13:23:45,484 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting aa6c19eb1797426aa118131df837fdc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109022617 2024-11-20T13:23:45,484 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into df0f063fe9c4441ca62024472c644ae9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:45,484 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/1a1b12b09d814636bae260e1f0110f26 2024-11-20T13:23:45,484 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:45,485 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=12, startTime=1732109025216; duration=0sec 2024-11-20T13:23:45,485 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c30a8b2034ee4439bbb8eafb9ba58b5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:45,485 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:45,485 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:45,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/52da2a13cff54cd8a085469772e10d0d is 50, key is test_row_0/B:col10/1732109024375/Put/seqid=0 2024-11-20T13:23:45,526 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:45,527 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d5bffbe6eac14c20a7a552c1d0ad9532 is 50, key is test_row_0/B:col10/1732109023949/Put/seqid=0 2024-11-20T13:23:45,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:45,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:45,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741908_1084 (size=12301) 2024-11-20T13:23:45,564 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/52da2a13cff54cd8a085469772e10d0d 2024-11-20T13:23:45,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741909_1085 (size=13051) 2024-11-20T13:23:45,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/34dbae8a39e24b5e9c11d48af22897ee is 50, key is test_row_0/C:col10/1732109024375/Put/seqid=0 2024-11-20T13:23:45,589 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d5bffbe6eac14c20a7a552c1d0ad9532 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d5bffbe6eac14c20a7a552c1d0ad9532 2024-11-20T13:23:45,617 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into d5bffbe6eac14c20a7a552c1d0ad9532(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:45,617 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:45,617 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=12, startTime=1732109025217; duration=0sec 2024-11-20T13:23:45,617 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:45,617 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:45,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109085619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109085621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741910_1086 (size=12301) 2024-11-20T13:23:45,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109085631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109085635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,643 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/34dbae8a39e24b5e9c11d48af22897ee 2024-11-20T13:23:45,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/1a1b12b09d814636bae260e1f0110f26 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26 2024-11-20T13:23:45,659 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T13:23:45,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/52da2a13cff54cd8a085469772e10d0d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d 2024-11-20T13:23:45,682 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T13:23:45,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/34dbae8a39e24b5e9c11d48af22897ee as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee 2024-11-20T13:23:45,716 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T13:23:45,718 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for cbbdc72320da06253b5398d0c51c77ae in 429ms, sequenceid=335, compaction requested=false 2024-11-20T13:23:45,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:45,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:45,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T13:23:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T13:23:45,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T13:23:45,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 896 msec 2024-11-20T13:23:45,731 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 915 msec 2024-11-20T13:23:45,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T13:23:45,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:45,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/42add19b932549fd9f769eb1cb3ed754 is 50, key is test_row_0/A:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:45,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109085798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109085804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109085817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109085817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741911_1087 (size=14741) 2024-11-20T13:23:45,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/42add19b932549fd9f769eb1cb3ed754 2024-11-20T13:23:45,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/81fd7e099ad6418a82122739924dcc7f is 50, key is test_row_0/B:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T13:23:45,932 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T13:23:45,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109085926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109085926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,934 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:45,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741912_1088 (size=12301) 2024-11-20T13:23:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T13:23:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109085926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/81fd7e099ad6418a82122739924dcc7f 2024-11-20T13:23:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:45,943 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:45,945 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:45,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:45,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109085929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:45,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/21fd71dc20ee4861a6135acfdaae15d3 is 50, key is test_row_0/C:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:46,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741913_1089 (size=12301) 2024-11-20T13:23:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:46,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/21fd71dc20ee4861a6135acfdaae15d3 2024-11-20T13:23:46,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/42add19b932549fd9f769eb1cb3ed754 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754 2024-11-20T13:23:46,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754, entries=200, sequenceid=360, filesize=14.4 K 2024-11-20T13:23:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/81fd7e099ad6418a82122739924dcc7f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f 2024-11-20T13:23:46,097 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T13:23:46,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:46,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T13:23:46,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/21fd71dc20ee4861a6135acfdaae15d3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3 2024-11-20T13:23:46,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T13:23:46,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for cbbdc72320da06253b5398d0c51c77ae in 372ms, sequenceid=360, compaction requested=true 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:46,124 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:46,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:23:46,125 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:46,128 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:46,128 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:46,128 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:46,128 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:46,128 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,128 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,128 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d5bffbe6eac14c20a7a552c1d0ad9532, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.8 K 2024-11-20T13:23:46,128 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/df0f063fe9c4441ca62024472c644ae9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=39.2 K 2024-11-20T13:23:46,129 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5bffbe6eac14c20a7a552c1d0ad9532, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:46,130 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting df0f063fe9c4441ca62024472c644ae9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:46,130 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52da2a13cff54cd8a085469772e10d0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109024355 2024-11-20T13:23:46,130 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a1b12b09d814636bae260e1f0110f26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109024355 2024-11-20T13:23:46,130 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81fd7e099ad6418a82122739924dcc7f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025620 2024-11-20T13:23:46,131 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 42add19b932549fd9f769eb1cb3ed754, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025585 2024-11-20T13:23:46,166 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:46,167 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/debb4e25e0d44b2a89377bb7862024a2 is 50, key is test_row_0/B:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:46,194 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#76 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:46,195 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5a77b888c0cc427ead866490a11d098f is 50, key is test_row_0/A:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:46,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:46,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/033e3050b01b48d197c0ca3a9d65d462 is 50, key is test_row_0/A:col10/1732109025813/Put/seqid=0 2024-11-20T13:23:46,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741914_1090 (size=13153) 2024-11-20T13:23:46,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T13:23:46,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:46,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741915_1091 (size=13153) 2024-11-20T13:23:46,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109086278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109086285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109086294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109086294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109086295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741916_1092 (size=17181) 2024-11-20T13:23:46,333 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/debb4e25e0d44b2a89377bb7862024a2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/debb4e25e0d44b2a89377bb7862024a2 2024-11-20T13:23:46,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/033e3050b01b48d197c0ca3a9d65d462 2024-11-20T13:23:46,340 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5a77b888c0cc427ead866490a11d098f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5a77b888c0cc427ead866490a11d098f 2024-11-20T13:23:46,346 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into debb4e25e0d44b2a89377bb7862024a2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:46,346 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:46,346 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109026124; duration=0sec 2024-11-20T13:23:46,346 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:46,346 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:46,346 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:46,351 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 5a77b888c0cc427ead866490a11d098f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:46,351 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:46,351 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109026124; duration=0sec 2024-11-20T13:23:46,351 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:46,351 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:46,352 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:46,352 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:46,353 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,353 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/5d8e40db79cb42f3a0ec905e41461032, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=36.8 K 2024-11-20T13:23:46,353 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d8e40db79cb42f3a0ec905e41461032, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732109023917 2024-11-20T13:23:46,356 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34dbae8a39e24b5e9c11d48af22897ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109024355 2024-11-20T13:23:46,357 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21fd71dc20ee4861a6135acfdaae15d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025620 2024-11-20T13:23:46,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2e6f817a7be2465293e34cd1216a6949 is 50, key is test_row_0/B:col10/1732109025813/Put/seqid=0 2024-11-20T13:23:46,379 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:46,380 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/7dea75d1ab0e4728958680521c2c609e is 50, key is test_row_0/C:col10/1732109025620/Put/seqid=0 2024-11-20T13:23:46,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109086398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109086401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109086401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741917_1093 (size=12301) 2024-11-20T13:23:46,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2e6f817a7be2465293e34cd1216a6949 2024-11-20T13:23:46,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109086409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109086416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T13:23:46,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:46,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:46,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741918_1094 (size=13153) 2024-11-20T13:23:46,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d95fd5cfb6534ec5b25346b703232d42 is 50, key is test_row_0/C:col10/1732109025813/Put/seqid=0 2024-11-20T13:23:46,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741919_1095 (size=12301) 2024-11-20T13:23:46,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d95fd5cfb6534ec5b25346b703232d42 2024-11-20T13:23:46,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/033e3050b01b48d197c0ca3a9d65d462 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462 2024-11-20T13:23:46,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462, entries=250, sequenceid=378, filesize=16.8 K 2024-11-20T13:23:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/2e6f817a7be2465293e34cd1216a6949 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949 2024-11-20T13:23:46,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949, entries=150, sequenceid=378, filesize=12.0 K 2024-11-20T13:23:46,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d95fd5cfb6534ec5b25346b703232d42 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42 2024-11-20T13:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:46,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42, entries=150, sequenceid=378, filesize=12.0 K 2024-11-20T13:23:46,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for cbbdc72320da06253b5398d0c51c77ae in 358ms, sequenceid=378, compaction requested=false 2024-11-20T13:23:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:46,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T13:23:46,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:46,601 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T13:23:46,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:46,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:46,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/2ba0bd4f0ec2496eba499e51e08a3310 is 50, key is test_row_0/A:col10/1732109026289/Put/seqid=0 2024-11-20T13:23:46,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741920_1096 (size=12301) 2024-11-20T13:23:46,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109086673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109086673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109086682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,692 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/2ba0bd4f0ec2496eba499e51e08a3310 2024-11-20T13:23:46,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109086684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109086685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/391f23a9c8d345d89b70028938741350 is 50, key is test_row_0/B:col10/1732109026289/Put/seqid=0 2024-11-20T13:23:46,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741921_1097 (size=12301) 2024-11-20T13:23:46,784 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/391f23a9c8d345d89b70028938741350 2024-11-20T13:23:46,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109086792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109086793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109086794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/2508f000121a4a80ab2ecb7f0d137a9c is 50, key is test_row_0/C:col10/1732109026289/Put/seqid=0 2024-11-20T13:23:46,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109086802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109086820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:46,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741922_1098 (size=12301) 2024-11-20T13:23:46,860 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/7dea75d1ab0e4728958680521c2c609e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/7dea75d1ab0e4728958680521c2c609e 2024-11-20T13:23:46,868 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into 7dea75d1ab0e4728958680521c2c609e(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:46,868 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:46,868 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109026124; duration=0sec 2024-11-20T13:23:46,868 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:46,868 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:47,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109087010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109087010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109087011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109087012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109087025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:47,261 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/2508f000121a4a80ab2ecb7f0d137a9c 2024-11-20T13:23:47,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/2ba0bd4f0ec2496eba499e51e08a3310 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310 2024-11-20T13:23:47,281 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310, entries=150, sequenceid=398, filesize=12.0 K 2024-11-20T13:23:47,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/391f23a9c8d345d89b70028938741350 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350 2024-11-20T13:23:47,297 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350, entries=150, sequenceid=398, filesize=12.0 K 2024-11-20T13:23:47,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/2508f000121a4a80ab2ecb7f0d137a9c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c 2024-11-20T13:23:47,307 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c, entries=150, sequenceid=398, filesize=12.0 K 2024-11-20T13:23:47,309 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for cbbdc72320da06253b5398d0c51c77ae in 708ms, sequenceid=398, compaction requested=true 2024-11-20T13:23:47,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:47,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:47,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T13:23:47,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T13:23:47,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T13:23:47,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3670 sec 2024-11-20T13:23:47,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.3890 sec 2024-11-20T13:23:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:47,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:47,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:47,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5c8d5c4ecabd4d6790cb6ead46438fef is 50, key is test_row_0/A:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:47,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109087375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109087379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109087380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109087383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109087392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741923_1099 (size=14741) 2024-11-20T13:23:47,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109087495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109087506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109087506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109087508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109087508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109087715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109087716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109087720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109087723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:47,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109087723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:47,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5c8d5c4ecabd4d6790cb6ead46438fef 2024-11-20T13:23:47,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b03e0df17ace4e559b1bd823b2912e9b is 50, key is test_row_0/B:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:47,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741924_1100 (size=12301) 2024-11-20T13:23:48,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109088030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109088033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109088031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109088034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109088043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T13:23:48,060 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T13:23:48,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:48,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T13:23:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T13:23:48,168 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:48,173 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:48,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:48,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T13:23:48,336 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T13:23:48,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:48,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b03e0df17ace4e559b1bd823b2912e9b 2024-11-20T13:23:48,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/36b9efd59b654e72bde0c383b2c4ffc4 is 50, key is test_row_0/C:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T13:23:48,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741925_1101 (size=12301) 2024-11-20T13:23:48,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/36b9efd59b654e72bde0c383b2c4ffc4 2024-11-20T13:23:48,512 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T13:23:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:48,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/5c8d5c4ecabd4d6790cb6ead46438fef as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef 2024-11-20T13:23:48,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109088546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109088551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109088556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109088560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef, entries=200, sequenceid=419, filesize=14.4 K 2024-11-20T13:23:48,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/b03e0df17ace4e559b1bd823b2912e9b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b 2024-11-20T13:23:48,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:48,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109088564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b, entries=150, sequenceid=419, filesize=12.0 K 2024-11-20T13:23:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/36b9efd59b654e72bde0c383b2c4ffc4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4 2024-11-20T13:23:48,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4, entries=150, sequenceid=419, filesize=12.0 K 2024-11-20T13:23:48,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for cbbdc72320da06253b5398d0c51c77ae in 1309ms, sequenceid=419, compaction requested=true 2024-11-20T13:23:48,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:48,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:48,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:48,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:48,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:48,641 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:48,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:48,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:23:48,644 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:48,648 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57376 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:48,649 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:48,649 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,649 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5a77b888c0cc427ead866490a11d098f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=56.0 K 2024-11-20T13:23:48,650 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:48,650 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:48,650 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,650 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/debb4e25e0d44b2a89377bb7862024a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=48.9 K 2024-11-20T13:23:48,650 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a77b888c0cc427ead866490a11d098f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025620 2024-11-20T13:23:48,651 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting debb4e25e0d44b2a89377bb7862024a2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025620 2024-11-20T13:23:48,652 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 033e3050b01b48d197c0ca3a9d65d462, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732109025790 2024-11-20T13:23:48,652 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6f817a7be2465293e34cd1216a6949, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732109025790 2024-11-20T13:23:48,652 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba0bd4f0ec2496eba499e51e08a3310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732109026283 2024-11-20T13:23:48,653 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c8d5c4ecabd4d6790cb6ead46438fef, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:48,653 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 391f23a9c8d345d89b70028938741350, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732109026283 2024-11-20T13:23:48,654 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b03e0df17ace4e559b1bd823b2912e9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:48,671 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#87 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:48,672 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/11daa8fe0e604702b843c09499cbccbc is 50, key is test_row_0/B:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:48,676 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:48,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T13:23:48,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,680 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:48,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:48,691 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:48,691 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/196be426d6cd48f7a3052408c6f86bc4 is 50, key is test_row_0/A:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:48,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/c0d73c25b43d4fcd98bb87f0e6d7e463 is 50, key is test_row_0/A:col10/1732109027380/Put/seqid=0 2024-11-20T13:23:48,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741926_1102 (size=13289) 2024-11-20T13:23:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T13:23:48,812 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/11daa8fe0e604702b843c09499cbccbc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/11daa8fe0e604702b843c09499cbccbc 2024-11-20T13:23:48,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741928_1104 (size=12301) 2024-11-20T13:23:48,825 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/c0d73c25b43d4fcd98bb87f0e6d7e463 2024-11-20T13:23:48,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741927_1103 (size=13289) 2024-11-20T13:23:48,852 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into 11daa8fe0e604702b843c09499cbccbc(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:48,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:48,852 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=12, startTime=1732109028640; duration=0sec 2024-11-20T13:23:48,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:48,853 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:48,854 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:23:48,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/f4e5d492f7114e4fa0d3ef5d6481e124 is 50, key is test_row_0/B:col10/1732109027380/Put/seqid=0 2024-11-20T13:23:48,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:23:48,870 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:48,870 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:48,870 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/7dea75d1ab0e4728958680521c2c609e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=48.9 K 2024-11-20T13:23:48,873 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dea75d1ab0e4728958680521c2c609e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732109025620 2024-11-20T13:23:48,878 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d95fd5cfb6534ec5b25346b703232d42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732109025790 2024-11-20T13:23:48,881 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/196be426d6cd48f7a3052408c6f86bc4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/196be426d6cd48f7a3052408c6f86bc4 2024-11-20T13:23:48,881 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2508f000121a4a80ab2ecb7f0d137a9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732109026283 2024-11-20T13:23:48,883 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36b9efd59b654e72bde0c383b2c4ffc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:48,905 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 196be426d6cd48f7a3052408c6f86bc4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:48,905 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:48,905 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=12, startTime=1732109028640; duration=0sec 2024-11-20T13:23:48,905 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:48,905 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741929_1105 (size=12301) 2024-11-20T13:23:48,957 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/f4e5d492f7114e4fa0d3ef5d6481e124 2024-11-20T13:23:48,971 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#91 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:48,977 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/2d794ae4ba67451a815e931f31ef6dbe is 50, key is test_row_0/C:col10/1732109026670/Put/seqid=0 2024-11-20T13:23:48,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/1da3caca4c8a419da2308d485e505cb0 is 50, key is test_row_0/C:col10/1732109027380/Put/seqid=0 2024-11-20T13:23:49,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741930_1106 (size=13289) 2024-11-20T13:23:49,045 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/2d794ae4ba67451a815e931f31ef6dbe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2d794ae4ba67451a815e931f31ef6dbe 2024-11-20T13:23:49,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741931_1107 (size=12301) 2024-11-20T13:23:49,057 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into 2d794ae4ba67451a815e931f31ef6dbe(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:49,058 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:49,058 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=12, startTime=1732109028641; duration=0sec 2024-11-20T13:23:49,058 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:49,058 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:49,059 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/1da3caca4c8a419da2308d485e505cb0 2024-11-20T13:23:49,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/c0d73c25b43d4fcd98bb87f0e6d7e463 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463 2024-11-20T13:23:49,086 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T13:23:49,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/f4e5d492f7114e4fa0d3ef5d6481e124 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124 2024-11-20T13:23:49,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T13:23:49,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/1da3caca4c8a419da2308d485e505cb0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0 2024-11-20T13:23:49,105 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0, entries=150, sequenceid=435, filesize=12.0 K 2024-11-20T13:23:49,107 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for cbbdc72320da06253b5398d0c51c77ae in 426ms, sequenceid=435, compaction requested=false 2024-11-20T13:23:49,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:49,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T13:23:49,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T13:23:49,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T13:23:49,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 937 msec 2024-11-20T13:23:49,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.0530 sec 2024-11-20T13:23:49,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T13:23:49,277 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T13:23:49,280 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:49,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T13:23:49,282 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:49,283 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:49,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:49,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T13:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T13:23:49,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T13:23:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T13:23:49,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T13:23:49,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T13:23:49,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 164 msec 2024-11-20T13:23:49,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 171 msec 2024-11-20T13:23:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T13:23:49,589 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T13:23:49,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T13:23:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T13:23:49,597 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:49,598 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:49,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:49,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:23:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:49,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/237745a12c57462993ed7c70a9169e94 is 50, key is test_row_0/A:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:49,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741932_1108 (size=19621) 2024-11-20T13:23:49,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/237745a12c57462993ed7c70a9169e94 2024-11-20T13:23:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T13:23:49,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109089688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/260f53bb14d74ef3a3ec7acca0de1088 is 50, key is test_row_0/B:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:49,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109089694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109089706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109089708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109089711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,751 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T13:23:49,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:49,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:49,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741933_1109 (size=12301) 2024-11-20T13:23:49,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/260f53bb14d74ef3a3ec7acca0de1088 2024-11-20T13:23:49,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4ae8a61abc574f56bb0a20d1d2d2d982 is 50, key is test_row_0/C:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:49,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109089812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109089813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109089820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109089821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109089823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741934_1110 (size=12301) 2024-11-20T13:23:49,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4ae8a61abc574f56bb0a20d1d2d2d982 2024-11-20T13:23:49,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/237745a12c57462993ed7c70a9169e94 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94 2024-11-20T13:23:49,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94, entries=300, sequenceid=451, filesize=19.2 K 2024-11-20T13:23:49,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/260f53bb14d74ef3a3ec7acca0de1088 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088 2024-11-20T13:23:49,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T13:23:49,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/4ae8a61abc574f56bb0a20d1d2d2d982 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982 2024-11-20T13:23:49,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T13:23:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T13:23:49,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for cbbdc72320da06253b5398d0c51c77ae in 295ms, sequenceid=451, compaction requested=true 2024-11-20T13:23:49,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:49,908 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:49,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:49,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:49,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:49,910 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:49,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:49,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:49,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:49,911 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45211 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:49,911 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:49,911 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,911 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/196be426d6cd48f7a3052408c6f86bc4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=44.2 K 2024-11-20T13:23:49,912 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 196be426d6cd48f7a3052408c6f86bc4, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:49,913 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0d73c25b43d4fcd98bb87f0e6d7e463, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732109027378 2024-11-20T13:23:49,913 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:49,913 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:49,913 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,913 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/11daa8fe0e604702b843c09499cbccbc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.0 K 2024-11-20T13:23:49,914 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 11daa8fe0e604702b843c09499cbccbc, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:49,914 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:49,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f4e5d492f7114e4fa0d3ef5d6481e124, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732109027378 2024-11-20T13:23:49,916 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 260f53bb14d74ef3a3ec7acca0de1088, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029607 2024-11-20T13:23:49,916 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 237745a12c57462993ed7c70a9169e94, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029595 2024-11-20T13:23:49,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T13:23:49,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:49,916 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:49,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:49,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/d2831f81e53b496eb827cd5b4dad7860 is 50, key is test_row_0/A:col10/1732109029695/Put/seqid=0 2024-11-20T13:23:49,953 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#97 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:49,954 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/e7970e7c716a46acb38d6f209797a0b8 is 50, key is test_row_0/B:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:49,961 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#98 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:49,962 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/89f5798ceea8458fa3ea3952ba99d1e1 is 50, key is test_row_0/A:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741936_1112 (size=13391) 2024-11-20T13:23:50,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:50,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:50,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741935_1111 (size=12301) 2024-11-20T13:23:50,058 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/e7970e7c716a46acb38d6f209797a0b8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7970e7c716a46acb38d6f209797a0b8 2024-11-20T13:23:50,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/d2831f81e53b496eb827cd5b4dad7860 2024-11-20T13:23:50,070 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into e7970e7c716a46acb38d6f209797a0b8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:50,070 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:50,070 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109029910; duration=0sec 2024-11-20T13:23:50,071 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:50,071 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:50,072 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:50,076 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:50,076 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:50,076 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:50,076 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2d794ae4ba67451a815e931f31ef6dbe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.0 K 2024-11-20T13:23:50,076 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d794ae4ba67451a815e931f31ef6dbe, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732109026670 2024-11-20T13:23:50,077 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da3caca4c8a419da2308d485e505cb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732109027378 2024-11-20T13:23:50,078 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ae8a61abc574f56bb0a20d1d2d2d982, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029607 2024-11-20T13:23:50,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109090048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109090050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109090087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109090087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109090088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741937_1113 (size=13391) 2024-11-20T13:23:50,114 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/89f5798ceea8458fa3ea3952ba99d1e1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/89f5798ceea8458fa3ea3952ba99d1e1 2024-11-20T13:23:50,121 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 89f5798ceea8458fa3ea3952ba99d1e1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:50,121 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:50,121 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109029908; duration=0sec 2024-11-20T13:23:50,122 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:50,122 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:50,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/73d0e3c5f42f464ca564abef319780a3 is 50, key is test_row_0/B:col10/1732109029695/Put/seqid=0 2024-11-20T13:23:50,146 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#100 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:50,147 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d2f4e4c2843a44b1b9bca6cd90e58eb7 is 50, key is test_row_0/C:col10/1732109029611/Put/seqid=0 2024-11-20T13:23:50,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T13:23:50,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741938_1114 (size=12301) 2024-11-20T13:23:50,221 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/73d0e3c5f42f464ca564abef319780a3 2024-11-20T13:23:50,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741939_1115 (size=13391) 2024-11-20T13:23:50,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109090200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109090200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109090216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109090234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109090237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/946e46f3e2074e1dac7c362aa931157d is 50, key is test_row_0/C:col10/1732109029695/Put/seqid=0 2024-11-20T13:23:50,257 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/d2f4e4c2843a44b1b9bca6cd90e58eb7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d2f4e4c2843a44b1b9bca6cd90e58eb7 2024-11-20T13:23:50,279 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into d2f4e4c2843a44b1b9bca6cd90e58eb7(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:50,279 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:50,279 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109029910; duration=0sec 2024-11-20T13:23:50,280 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:50,280 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741940_1116 (size=12301) 2024-11-20T13:23:50,301 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/946e46f3e2074e1dac7c362aa931157d 2024-11-20T13:23:50,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/d2831f81e53b496eb827cd5b4dad7860 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860 2024-11-20T13:23:50,319 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860, entries=150, sequenceid=476, filesize=12.0 K 2024-11-20T13:23:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/73d0e3c5f42f464ca564abef319780a3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3 2024-11-20T13:23:50,343 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3, entries=150, sequenceid=476, filesize=12.0 K 2024-11-20T13:23:50,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/946e46f3e2074e1dac7c362aa931157d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d 2024-11-20T13:23:50,351 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d, entries=150, sequenceid=476, filesize=12.0 K 2024-11-20T13:23:50,352 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for cbbdc72320da06253b5398d0c51c77ae in 436ms, sequenceid=476, compaction requested=false 2024-11-20T13:23:50,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:50,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:50,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T13:23:50,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T13:23:50,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T13:23:50,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 756 msec 2024-11-20T13:23:50,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 772 msec 2024-11-20T13:23:50,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:50,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:50,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:50,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e8bbb2a1c50e49f98bea3b058184ebf6 is 50, key is test_row_0/A:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:50,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741941_1117 (size=14741) 2024-11-20T13:23:50,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109090573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109090575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109090576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109090580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109090573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109090697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T13:23:50,717 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T13:23:50,722 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:50,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109090709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-20T13:23:50,725 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:50,726 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:50,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109090709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:50,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T13:23:50,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109090721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109090721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T13:23:50,879 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T13:23:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:50,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:50,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:50,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:50,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109090921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109090929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109090933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109090933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:50,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109090933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:50,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e8bbb2a1c50e49f98bea3b058184ebf6 2024-11-20T13:23:50,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d66a6a67aefb400687ffa3595c586b42 is 50, key is test_row_0/B:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741942_1118 (size=12301) 2024-11-20T13:23:51,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d66a6a67aefb400687ffa3595c586b42 2024-11-20T13:23:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T13:23:51,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T13:23:51,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:51,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:51,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:51,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:51,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/33edafb043d44a0687efc229ea5250e2 is 50, key is test_row_0/C:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:51,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741943_1119 (size=12301) 2024-11-20T13:23:51,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/33edafb043d44a0687efc229ea5250e2 2024-11-20T13:23:51,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e8bbb2a1c50e49f98bea3b058184ebf6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6 2024-11-20T13:23:51,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6, entries=200, sequenceid=492, filesize=14.4 K 2024-11-20T13:23:51,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/d66a6a67aefb400687ffa3595c586b42 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42 2024-11-20T13:23:51,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42, entries=150, sequenceid=492, filesize=12.0 K 2024-11-20T13:23:51,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/33edafb043d44a0687efc229ea5250e2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2 2024-11-20T13:23:51,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2, entries=150, sequenceid=492, filesize=12.0 K 2024-11-20T13:23:51,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for cbbdc72320da06253b5398d0c51c77ae in 727ms, sequenceid=492, compaction requested=true 2024-11-20T13:23:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:51,181 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:51,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:23:51,183 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:51,183 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:51,183 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,183 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/89f5798ceea8458fa3ea3952ba99d1e1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=39.5 K 2024-11-20T13:23:51,184 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:51,184 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 89f5798ceea8458fa3ea3952ba99d1e1, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029607 2024-11-20T13:23:51,186 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d2831f81e53b496eb827cd5b4dad7860, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732109029695 2024-11-20T13:23:51,190 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e8bbb2a1c50e49f98bea3b058184ebf6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030042 2024-11-20T13:23:51,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T13:23:51,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,195 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:23:51,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:51,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:51,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:51,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,200 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:51,200 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:51,200 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,200 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7970e7c716a46acb38d6f209797a0b8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.1 K 2024-11-20T13:23:51,212 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7970e7c716a46acb38d6f209797a0b8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029607 2024-11-20T13:23:51,213 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#105 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:51,214 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/0864e06e7be14b42b998de551dacb87d is 50, key is test_row_0/A:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:51,220 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73d0e3c5f42f464ca564abef319780a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732109029695 2024-11-20T13:23:51,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e767d4fd35c04923a212f41e2207dcd3 is 50, key is test_row_0/A:col10/1732109030574/Put/seqid=0 2024-11-20T13:23:51,228 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d66a6a67aefb400687ffa3595c586b42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030044 2024-11-20T13:23:51,250 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#107 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:51,251 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0 is 50, key is test_row_0/B:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:51,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:51,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:51,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741945_1121 (size=12301) 2024-11-20T13:23:51,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741944_1120 (size=13493) 2024-11-20T13:23:51,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109091287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,305 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e767d4fd35c04923a212f41e2207dcd3 2024-11-20T13:23:51,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109091289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109091297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109091300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109091311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,323 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/0864e06e7be14b42b998de551dacb87d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0864e06e7be14b42b998de551dacb87d 2024-11-20T13:23:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T13:23:51,334 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 0864e06e7be14b42b998de551dacb87d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:51,334 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:51,335 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109031181; duration=0sec 2024-11-20T13:23:51,335 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:51,335 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:51,335 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:51,338 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:51,338 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:51,338 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,339 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d2f4e4c2843a44b1b9bca6cd90e58eb7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.1 K 2024-11-20T13:23:51,340 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f4e4c2843a44b1b9bca6cd90e58eb7, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732109029607 2024-11-20T13:23:51,341 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 946e46f3e2074e1dac7c362aa931157d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732109029695 2024-11-20T13:23:51,342 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 33edafb043d44a0687efc229ea5250e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030044 2024-11-20T13:23:51,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741946_1122 (size=13493) 2024-11-20T13:23:51,368 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0 2024-11-20T13:23:51,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/e7b2eb25fdda47f2bcfe8acd783cadef is 50, key is test_row_0/B:col10/1732109030574/Put/seqid=0 2024-11-20T13:23:51,386 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into 0ba7a259aa6e4e9ab6388dbbc0ad59f0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:51,386 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:51,386 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#109 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:51,386 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109031181; duration=0sec 2024-11-20T13:23:51,386 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:51,386 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:51,387 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/b4fadd121df1494a92f57a22b2b7400b is 50, key is test_row_0/C:col10/1732109030073/Put/seqid=0 2024-11-20T13:23:51,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109091403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741947_1123 (size=12301) 2024-11-20T13:23:51,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109091408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,426 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/e7b2eb25fdda47f2bcfe8acd783cadef 2024-11-20T13:23:51,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109091412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109091425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109091412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741948_1124 (size=13493) 2024-11-20T13:23:51,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/b4fadd121df1494a92f57a22b2b7400b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b4fadd121df1494a92f57a22b2b7400b 2024-11-20T13:23:51,474 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into b4fadd121df1494a92f57a22b2b7400b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:51,474 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:51,474 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109031181; duration=0sec 2024-11-20T13:23:51,474 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:51,474 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:51,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/365f121bfde548f0a9ed9c3adb82c4e0 is 50, key is test_row_0/C:col10/1732109030574/Put/seqid=0 2024-11-20T13:23:51,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741949_1125 (size=12301) 2024-11-20T13:23:51,564 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/365f121bfde548f0a9ed9c3adb82c4e0 2024-11-20T13:23:51,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/e767d4fd35c04923a212f41e2207dcd3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3 2024-11-20T13:23:51,621 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3, entries=150, sequenceid=516, filesize=12.0 K 2024-11-20T13:23:51,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/e7b2eb25fdda47f2bcfe8acd783cadef as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef 2024-11-20T13:23:51,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109091619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109091632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109091635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,648 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef, entries=150, sequenceid=516, filesize=12.0 K 2024-11-20T13:23:51,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109091637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/365f121bfde548f0a9ed9c3adb82c4e0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0 2024-11-20T13:23:51,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109091647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,669 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0, entries=150, sequenceid=516, filesize=12.0 K 2024-11-20T13:23:51,670 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for cbbdc72320da06253b5398d0c51c77ae in 475ms, sequenceid=516, compaction requested=false 2024-11-20T13:23:51,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:51,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:51,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-20T13:23:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-20T13:23:51,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T13:23:51,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 946 msec 2024-11-20T13:23:51,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 952 msec 2024-11-20T13:23:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T13:23:51,834 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T13:23:51,836 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:23:51,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees 2024-11-20T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:51,845 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:23:51,846 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:23:51,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:23:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:51,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:51,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:51,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a71f54d1f0434c43bfeecdd43152b23e is 50, key is test_row_0/A:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:51,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109091980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109091983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109091990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109091992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:51,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109091993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741950_1126 (size=14741) 2024-11-20T13:23:52,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109092097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109092097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109092101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109092102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109092102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:52,154 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109092313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109092316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109092316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109092320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109092315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a71f54d1f0434c43bfeecdd43152b23e 2024-11-20T13:23:52,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/3ecee874e572496aa7ddc2d7166d112f is 50, key is test_row_0/B:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:52,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741951_1127 (size=12301) 2024-11-20T13:23:52,636 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40172 deadline: 1732109092627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40204 deadline: 1732109092619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40252 deadline: 1732109092627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40188 deadline: 1732109092628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:23:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732109092632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/3ecee874e572496aa7ddc2d7166d112f 2024-11-20T13:23:52,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/45973f01fb8f46ea90e8a4d11a9d01f0 is 50, key is test_row_0/C:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:52,952 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:52,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:52,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:52,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:52,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:23:52,981 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x080f3e14 to 127.0.0.1:53074 2024-11-20T13:23:52,981 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:52,982 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2aea0556 to 127.0.0.1:53074 2024-11-20T13:23:52,982 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:52,994 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f59a475 to 127.0.0.1:53074 2024-11-20T13:23:52,994 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,001 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38296ae7 to 127.0.0.1:53074 2024-11-20T13:23:53,001 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741952_1128 (size=12301) 2024-11-20T13:23:53,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=533 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/45973f01fb8f46ea90e8a4d11a9d01f0 2024-11-20T13:23:53,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a71f54d1f0434c43bfeecdd43152b23e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e 2024-11-20T13:23:53,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e, entries=200, sequenceid=533, filesize=14.4 K 2024-11-20T13:23:53,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/3ecee874e572496aa7ddc2d7166d112f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f 2024-11-20T13:23:53,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f, entries=150, sequenceid=533, filesize=12.0 K 2024-11-20T13:23:53,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/45973f01fb8f46ea90e8a4d11a9d01f0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0 2024-11-20T13:23:53,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0, entries=150, sequenceid=533, filesize=12.0 K 2024-11-20T13:23:53,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for cbbdc72320da06253b5398d0c51c77ae in 1131ms, sequenceid=533, compaction requested=true 2024-11-20T13:23:53,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:53,067 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:53,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:23:53,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:53,069 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:53,069 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:53,069 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/A is initiating minor compaction (all files) 2024-11-20T13:23:53,069 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/A in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:53,070 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0864e06e7be14b42b998de551dacb87d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=39.6 K 2024-11-20T13:23:53,070 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:53,071 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/B is initiating minor compaction (all files) 2024-11-20T13:23:53,071 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/B in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:53,071 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.2 K 2024-11-20T13:23:53,071 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0864e06e7be14b42b998de551dacb87d, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030044 2024-11-20T13:23:53,072 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ba7a259aa6e4e9ab6388dbbc0ad59f0, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030044 2024-11-20T13:23:53,072 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e767d4fd35c04923a212f41e2207dcd3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732109030556 2024-11-20T13:23:53,073 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e7b2eb25fdda47f2bcfe8acd783cadef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732109030556 2024-11-20T13:23:53,073 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a71f54d1f0434c43bfeecdd43152b23e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732109031286 2024-11-20T13:23:53,074 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ecee874e572496aa7ddc2d7166d112f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732109031286 2024-11-20T13:23:53,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:23:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cbbdc72320da06253b5398d0c51c77ae:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:23:53,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:53,100 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#A#compaction#114 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:53,101 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/8679e59a7c9b4f21815a3a280b75e4f2 is 50, key is test_row_0/A:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:53,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:53,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-20T13:23:53,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:53,108 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:53,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:53,110 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#B#compaction#115 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:53,111 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/ac1d6c1743a64431ade925d128ecd3a4 is 50, key is test_row_0/B:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:53,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a1c63bec1aa54c41b81b4288b010c1d2 is 50, key is test_row_0/A:col10/1732109031977/Put/seqid=0 2024-11-20T13:23:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:53,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. as already flushing 2024-11-20T13:23:53,160 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e527f0a to 127.0.0.1:53074 2024-11-20T13:23:53,160 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,162 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c8859ce to 127.0.0.1:53074 2024-11-20T13:23:53,162 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,164 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x71111e88 to 127.0.0.1:53074 2024-11-20T13:23:53,164 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,164 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70f8b481 to 127.0.0.1:53074 2024-11-20T13:23:53,164 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,168 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75ceb322 to 127.0.0.1:53074 2024-11-20T13:23:53,168 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741953_1129 (size=13595) 2024-11-20T13:23:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741955_1131 (size=12301) 2024-11-20T13:23:53,207 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a1c63bec1aa54c41b81b4288b010c1d2 2024-11-20T13:23:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741954_1130 (size=13595) 2024-11-20T13:23:53,219 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/8679e59a7c9b4f21815a3a280b75e4f2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8679e59a7c9b4f21815a3a280b75e4f2 2024-11-20T13:23:53,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9430d8f5550c4ac18cc52660ecbefc26 is 50, key is test_row_0/B:col10/1732109031977/Put/seqid=0 2024-11-20T13:23:53,227 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/ac1d6c1743a64431ade925d128ecd3a4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/ac1d6c1743a64431ade925d128ecd3a4 2024-11-20T13:23:53,234 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/A of cbbdc72320da06253b5398d0c51c77ae into 8679e59a7c9b4f21815a3a280b75e4f2(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:53,234 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:53,234 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/A, priority=13, startTime=1732109033067; duration=0sec 2024-11-20T13:23:53,234 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:23:53,234 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:A 2024-11-20T13:23:53,234 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:23:53,238 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/B of cbbdc72320da06253b5398d0c51c77ae into ac1d6c1743a64431ade925d128ecd3a4(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:53,238 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:53,238 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/B, priority=13, startTime=1732109033069; duration=0sec 2024-11-20T13:23:53,238 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:23:53,238 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:53,238 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:B 2024-11-20T13:23:53,238 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): cbbdc72320da06253b5398d0c51c77ae/C is initiating minor compaction (all files) 2024-11-20T13:23:53,238 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cbbdc72320da06253b5398d0c51c77ae/C in TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:53,239 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b4fadd121df1494a92f57a22b2b7400b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp, totalSize=37.2 K 2024-11-20T13:23:53,239 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4fadd121df1494a92f57a22b2b7400b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732109030044 2024-11-20T13:23:53,240 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 365f121bfde548f0a9ed9c3adb82c4e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732109030556 2024-11-20T13:23:53,240 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45973f01fb8f46ea90e8a4d11a9d01f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=533, earliestPutTs=1732109031286 2024-11-20T13:23:53,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741956_1132 (size=12301) 2024-11-20T13:23:53,257 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cbbdc72320da06253b5398d0c51c77ae#C#compaction#118 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:23:53,258 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9430d8f5550c4ac18cc52660ecbefc26 2024-11-20T13:23:53,264 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/a2b286258ea1483894f8d8d21150512c is 50, key is test_row_0/C:col10/1732109031287/Put/seqid=0 2024-11-20T13:23:53,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741957_1133 (size=13595) 2024-11-20T13:23:53,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/f9f5826e13fc4c1886d9d50578b076d5 is 50, key is test_row_0/C:col10/1732109031977/Put/seqid=0 2024-11-20T13:23:53,307 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/a2b286258ea1483894f8d8d21150512c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/a2b286258ea1483894f8d8d21150512c 2024-11-20T13:23:53,323 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cbbdc72320da06253b5398d0c51c77ae/C of cbbdc72320da06253b5398d0c51c77ae into a2b286258ea1483894f8d8d21150512c(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:23:53,323 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:53,323 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae., storeName=cbbdc72320da06253b5398d0c51c77ae/C, priority=13, startTime=1732109033076; duration=0sec 2024-11-20T13:23:53,323 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:23:53,323 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cbbdc72320da06253b5398d0c51c77ae:C 2024-11-20T13:23:53,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741958_1134 (size=12301) 2024-11-20T13:23:53,330 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/f9f5826e13fc4c1886d9d50578b076d5 2024-11-20T13:23:53,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/a1c63bec1aa54c41b81b4288b010c1d2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a1c63bec1aa54c41b81b4288b010c1d2 2024-11-20T13:23:53,346 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a1c63bec1aa54c41b81b4288b010c1d2, entries=150, sequenceid=555, filesize=12.0 K 2024-11-20T13:23:53,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/9430d8f5550c4ac18cc52660ecbefc26 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9430d8f5550c4ac18cc52660ecbefc26 2024-11-20T13:23:53,357 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9430d8f5550c4ac18cc52660ecbefc26, entries=150, sequenceid=555, filesize=12.0 K 2024-11-20T13:23:53,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/f9f5826e13fc4c1886d9d50578b076d5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f9f5826e13fc4c1886d9d50578b076d5 2024-11-20T13:23:53,365 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f9f5826e13fc4c1886d9d50578b076d5, entries=150, sequenceid=555, filesize=12.0 K 2024-11-20T13:23:53,366 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=33.54 KB/34350 for cbbdc72320da06253b5398d0c51c77ae in 258ms, sequenceid=555, compaction requested=false 2024-11-20T13:23:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:53,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-11-20T13:23:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-11-20T13:23:53,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T13:23:53,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5220 sec 2024-11-20T13:23:53,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees in 1.5350 sec 2024-11-20T13:23:53,669 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:23:53,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T13:23:53,955 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 1549 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 1546 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 771 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 2313 rows 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 765 2024-11-20T13:23:53,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 2295 rows 2024-11-20T13:23:53,955 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:23:53,957 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6869c97c to 127.0.0.1:53074 2024-11-20T13:23:53,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:23:53,970 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:23:53,976 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:23:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:53,990 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109033989"}]},"ts":"1732109033989"} 2024-11-20T13:23:54,006 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:23:54,014 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:23:54,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:23:54,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:54,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, UNASSIGN}] 2024-11-20T13:23:54,026 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, UNASSIGN 2024-11-20T13:23:54,037 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=cbbdc72320da06253b5398d0c51c77ae, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:54,052 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:23:54,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:23:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:54,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:54,227 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:54,227 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:23:54,228 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing cbbdc72320da06253b5398d0c51c77ae, disabling compactions & flushes 2024-11-20T13:23:54,228 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:54,228 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:54,228 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. after waiting 0 ms 2024-11-20T13:23:54,228 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:54,229 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(2837): Flushing cbbdc72320da06253b5398d0c51c77ae 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T13:23:54,229 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=A 2024-11-20T13:23:54,229 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:54,229 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=B 2024-11-20T13:23:54,229 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:54,230 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cbbdc72320da06253b5398d0c51c77ae, store=C 2024-11-20T13:23:54,230 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:23:54,242 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/0b4e39e715ce437693ff484d706ab29a is 50, key is test_row_0/A:col10/1732109033158/Put/seqid=0 2024-11-20T13:23:54,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741959_1135 (size=12301) 2024-11-20T13:23:54,275 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/0b4e39e715ce437693ff484d706ab29a 2024-11-20T13:23:54,308 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/74b8333869b74a4eb9ab13748dd00128 is 50, key is test_row_0/B:col10/1732109033158/Put/seqid=0 2024-11-20T13:23:54,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:54,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741960_1136 (size=12301) 2024-11-20T13:23:54,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:54,731 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/74b8333869b74a4eb9ab13748dd00128 2024-11-20T13:23:54,753 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/9ef0d3a7da314ba6938c62449f1de73f is 50, key is test_row_0/C:col10/1732109033158/Put/seqid=0 2024-11-20T13:23:54,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741961_1137 (size=12301) 2024-11-20T13:23:55,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:55,201 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/9ef0d3a7da314ba6938c62449f1de73f 2024-11-20T13:23:55,215 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/A/0b4e39e715ce437693ff484d706ab29a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0b4e39e715ce437693ff484d706ab29a 2024-11-20T13:23:55,229 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0b4e39e715ce437693ff484d706ab29a, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T13:23:55,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/B/74b8333869b74a4eb9ab13748dd00128 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/74b8333869b74a4eb9ab13748dd00128 2024-11-20T13:23:55,243 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/74b8333869b74a4eb9ab13748dd00128, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T13:23:55,246 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/.tmp/C/9ef0d3a7da314ba6938c62449f1de73f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/9ef0d3a7da314ba6938c62449f1de73f 2024-11-20T13:23:55,253 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/9ef0d3a7da314ba6938c62449f1de73f, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T13:23:55,254 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for cbbdc72320da06253b5398d0c51c77ae in 1026ms, sequenceid=566, compaction requested=true 2024-11-20T13:23:55,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9bde4a4abccb4202962fe8af8c8c9918, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/63c2326d023d463cb2ed28e6794f24ca, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9ca469f82d0a4db3b92d50c4ffd99449, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/6db885bb1e964275b2fbb4bc8366ce95, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b5a950a42fed4530a3325ae2ca77885d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2d3fe302fa4b475797cd4cb196e18a3c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/df0f063fe9c4441ca62024472c644ae9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5a77b888c0cc427ead866490a11d098f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/196be426d6cd48f7a3052408c6f86bc4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/89f5798ceea8458fa3ea3952ba99d1e1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0864e06e7be14b42b998de551dacb87d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e] to archive 2024-11-20T13:23:55,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:23:55,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/be746f7c9fdb466d84fb2592a151cbf5 2024-11-20T13:23:55,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/3fcd3ff9ac0a4231a18d02c8eb8dd2b3 2024-11-20T13:23:55,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9810d3db726c417eb2b4e19df61ee600 2024-11-20T13:23:55,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9bde4a4abccb4202962fe8af8c8c9918 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9bde4a4abccb4202962fe8af8c8c9918 2024-11-20T13:23:55,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/ccb4bd314ade43249daaadd5407c7537 2024-11-20T13:23:55,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/63c2326d023d463cb2ed28e6794f24ca to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/63c2326d023d463cb2ed28e6794f24ca 2024-11-20T13:23:55,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/174c1e5841d8460f955af7ced5954923 2024-11-20T13:23:55,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b688ab5b865745da97bf42a25fa6adef 2024-11-20T13:23:55,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/4a5282a3b95d43e0a2becb39e281ad9b 2024-11-20T13:23:55,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9ca469f82d0a4db3b92d50c4ffd99449 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9ca469f82d0a4db3b92d50c4ffd99449 2024-11-20T13:23:55,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/9b933139d12b48988edbd809f1cbe969 2024-11-20T13:23:55,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/6db885bb1e964275b2fbb4bc8366ce95 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/6db885bb1e964275b2fbb4bc8366ce95 2024-11-20T13:23:55,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/fd56f71b82474865b85708891bdd6326 2024-11-20T13:23:55,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/53ea0b65940b4060abd2a8382b24f416 2024-11-20T13:23:55,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b767cef5c7bf467f80c75776097c7abd 2024-11-20T13:23:55,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b5a950a42fed4530a3325ae2ca77885d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/b5a950a42fed4530a3325ae2ca77885d 2024-11-20T13:23:55,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/347b899aa9a2483487a0887f32e82dbe 2024-11-20T13:23:55,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5afb8958c9254586b305f32b388136d6 2024-11-20T13:23:55,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2d3fe302fa4b475797cd4cb196e18a3c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2d3fe302fa4b475797cd4cb196e18a3c 2024-11-20T13:23:55,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/bf999d7263b84a7bbf8d8ac33872bd9a 2024-11-20T13:23:55,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/99baa019e97f493a95526566764de8d1 2024-11-20T13:23:55,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8c418cf97eaf4d3eab3c2e519c5744d7 2024-11-20T13:23:55,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/df0f063fe9c4441ca62024472c644ae9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/df0f063fe9c4441ca62024472c644ae9 2024-11-20T13:23:55,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/1a1b12b09d814636bae260e1f0110f26 2024-11-20T13:23:55,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/42add19b932549fd9f769eb1cb3ed754 2024-11-20T13:23:55,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5a77b888c0cc427ead866490a11d098f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5a77b888c0cc427ead866490a11d098f 2024-11-20T13:23:55,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/033e3050b01b48d197c0ca3a9d65d462 2024-11-20T13:23:55,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/2ba0bd4f0ec2496eba499e51e08a3310 2024-11-20T13:23:55,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/5c8d5c4ecabd4d6790cb6ead46438fef 2024-11-20T13:23:55,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/196be426d6cd48f7a3052408c6f86bc4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/196be426d6cd48f7a3052408c6f86bc4 2024-11-20T13:23:55,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/c0d73c25b43d4fcd98bb87f0e6d7e463 2024-11-20T13:23:55,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/237745a12c57462993ed7c70a9169e94 2024-11-20T13:23:55,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/89f5798ceea8458fa3ea3952ba99d1e1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/89f5798ceea8458fa3ea3952ba99d1e1 2024-11-20T13:23:55,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/d2831f81e53b496eb827cd5b4dad7860 2024-11-20T13:23:55,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e8bbb2a1c50e49f98bea3b058184ebf6 2024-11-20T13:23:55,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0864e06e7be14b42b998de551dacb87d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0864e06e7be14b42b998de551dacb87d 2024-11-20T13:23:55,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/e767d4fd35c04923a212f41e2207dcd3 2024-11-20T13:23:55,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a71f54d1f0434c43bfeecdd43152b23e 2024-11-20T13:23:55,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c93696e6180941aab637c9310fe6d843, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0d00a13478049ff8f131182c009b2f9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/edaf0aa1aa8945aa916a5a9ac405195e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52a6a661208c45de81e7f18bb8fb647a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d1b1bb577af64b46bcf444c319165722, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/49876362d5a443059d015428fb5962ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d5bffbe6eac14c20a7a552c1d0ad9532, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/debb4e25e0d44b2a89377bb7862024a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/11daa8fe0e604702b843c09499cbccbc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7970e7c716a46acb38d6f209797a0b8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f] to archive 2024-11-20T13:23:55,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:23:55,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b96e0ff69c974e578f863b0fa6ca68be 2024-11-20T13:23:55,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/4154fc76c16c4943a8f7cfd980028568 2024-11-20T13:23:55,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c93696e6180941aab637c9310fe6d843 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c93696e6180941aab637c9310fe6d843 2024-11-20T13:23:55,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7278a8b83b8345a58f5a1bb942ca510e 2024-11-20T13:23:55,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/8b91a90232d342d5a8d457e2e1fe9e4e 2024-11-20T13:23:55,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0d00a13478049ff8f131182c009b2f9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0d00a13478049ff8f131182c009b2f9 2024-11-20T13:23:55,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/7c0973046e9d4ed19ecc2b74446a3bcd 2024-11-20T13:23:55,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/83609fe96ac840f1973522b60bb15316 2024-11-20T13:23:55,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/edaf0aa1aa8945aa916a5a9ac405195e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/edaf0aa1aa8945aa916a5a9ac405195e 2024-11-20T13:23:55,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa299044f09043a49dc1f69910fc2f1e 2024-11-20T13:23:55,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2c84ef22b73049cea22fd707ad3d567e 2024-11-20T13:23:55,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52a6a661208c45de81e7f18bb8fb647a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52a6a661208c45de81e7f18bb8fb647a 2024-11-20T13:23:55,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c7c67122e4524ca9baa9d2cb63e720f1 2024-11-20T13:23:55,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9772a4a7ad434beba149fe945da7eb5d 2024-11-20T13:23:55,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d1b1bb577af64b46bcf444c319165722 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d1b1bb577af64b46bcf444c319165722 2024-11-20T13:23:55,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/65549fbb975449e799cc9e1b49af9967 2024-11-20T13:23:55,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d0cd3ed54f144c3ea0d32591400648eb 2024-11-20T13:23:55,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/49876362d5a443059d015428fb5962ce to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/49876362d5a443059d015428fb5962ce 2024-11-20T13:23:55,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c061b124668e4ded90dfbddfa796071e 2024-11-20T13:23:55,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/6562944edfea41c6905b85be6f10f762 2024-11-20T13:23:55,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/aa6c19eb1797426aa118131df837fdc4 2024-11-20T13:23:55,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d5bffbe6eac14c20a7a552c1d0ad9532 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d5bffbe6eac14c20a7a552c1d0ad9532 2024-11-20T13:23:55,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/c30a8b2034ee4439bbb8eafb9ba58b5f 2024-11-20T13:23:55,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/52da2a13cff54cd8a085469772e10d0d 2024-11-20T13:23:55,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/debb4e25e0d44b2a89377bb7862024a2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/debb4e25e0d44b2a89377bb7862024a2 2024-11-20T13:23:55,964 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/81fd7e099ad6418a82122739924dcc7f 2024-11-20T13:23:55,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/2e6f817a7be2465293e34cd1216a6949 2024-11-20T13:23:56,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/391f23a9c8d345d89b70028938741350 2024-11-20T13:23:56,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/11daa8fe0e604702b843c09499cbccbc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/11daa8fe0e604702b843c09499cbccbc 2024-11-20T13:23:56,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/b03e0df17ace4e559b1bd823b2912e9b 2024-11-20T13:23:56,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/f4e5d492f7114e4fa0d3ef5d6481e124 2024-11-20T13:23:56,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7970e7c716a46acb38d6f209797a0b8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7970e7c716a46acb38d6f209797a0b8 2024-11-20T13:23:56,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/260f53bb14d74ef3a3ec7acca0de1088 2024-11-20T13:23:56,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/73d0e3c5f42f464ca564abef319780a3 2024-11-20T13:23:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:56,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/0ba7a259aa6e4e9ab6388dbbc0ad59f0 2024-11-20T13:23:56,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/d66a6a67aefb400687ffa3595c586b42 2024-11-20T13:23:56,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/e7b2eb25fdda47f2bcfe8acd783cadef 2024-11-20T13:23:56,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/3ecee874e572496aa7ddc2d7166d112f 2024-11-20T13:23:56,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/dc63bf2ba86b492c87110e141ee6ceb9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/0fd5ed5661a140799dbae356d3e575eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/43f33a4976344584ba46152c5be414c5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f7f52ba60a7a4f249c494f3f59d31646, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/aaf8890ee98f4c1b9dcce2468c5efaa4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e51cbb68c27c4c56b0873925bf4026f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/5d8e40db79cb42f3a0ec905e41461032, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/7dea75d1ab0e4728958680521c2c609e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2d794ae4ba67451a815e931f31ef6dbe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d2f4e4c2843a44b1b9bca6cd90e58eb7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b4fadd121df1494a92f57a22b2b7400b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0] to archive 2024-11-20T13:23:56,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:23:56,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/db39da6ddaf34b9392e58b8c1dbe9d3a 2024-11-20T13:23:56,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/922e9546d60d479b84139d8b5136e065 2024-11-20T13:23:56,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/dc63bf2ba86b492c87110e141ee6ceb9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/dc63bf2ba86b492c87110e141ee6ceb9 2024-11-20T13:23:56,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b091cc6dbf3f4945b353c2a11dcd16f5 2024-11-20T13:23:56,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/bbeb7d315d5949578f3fca61367fde4c 2024-11-20T13:23:56,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/0fd5ed5661a140799dbae356d3e575eb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/0fd5ed5661a140799dbae356d3e575eb 2024-11-20T13:23:56,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/604e2c6ac417422bb304623116e36588 2024-11-20T13:23:56,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cd51b962ac76454ab5594384998e0db8 2024-11-20T13:23:56,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/43f33a4976344584ba46152c5be414c5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/43f33a4976344584ba46152c5be414c5 2024-11-20T13:23:56,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/cfe492969720440ebc79fbeae68fe128 2024-11-20T13:23:56,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/c3d121ac0ed34e3893a46a168eaf7115 2024-11-20T13:23:56,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f7f52ba60a7a4f249c494f3f59d31646 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f7f52ba60a7a4f249c494f3f59d31646 2024-11-20T13:23:56,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/83ebb54179c041ceb396c075c5bef4d6 2024-11-20T13:23:56,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4cdc6b38f8b340e09a2389de2bb1481b 2024-11-20T13:23:56,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/aaf8890ee98f4c1b9dcce2468c5efaa4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/aaf8890ee98f4c1b9dcce2468c5efaa4 2024-11-20T13:23:56,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d793372f8d2f44d8b85fe0c3a0332ab8 2024-11-20T13:23:56,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e626963d4cef4441a0f51a14dc207f22 2024-11-20T13:23:56,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e51cbb68c27c4c56b0873925bf4026f8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/e51cbb68c27c4c56b0873925bf4026f8 2024-11-20T13:23:56,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/25bdf48243044d658edbb40871d54325 2024-11-20T13:23:56,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8c3832b7ea2f4f9d8031174e55c48c99 2024-11-20T13:23:56,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/3d9e3b74336c4e2b80e9958d8df7e585 2024-11-20T13:23:56,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/5d8e40db79cb42f3a0ec905e41461032 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/5d8e40db79cb42f3a0ec905e41461032 2024-11-20T13:23:56,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/8639171a3c44490a85446780405190df 2024-11-20T13:23:56,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/34dbae8a39e24b5e9c11d48af22897ee 2024-11-20T13:23:56,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/7dea75d1ab0e4728958680521c2c609e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/7dea75d1ab0e4728958680521c2c609e 2024-11-20T13:23:56,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/21fd71dc20ee4861a6135acfdaae15d3 2024-11-20T13:23:56,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d95fd5cfb6534ec5b25346b703232d42 2024-11-20T13:23:56,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2508f000121a4a80ab2ecb7f0d137a9c 2024-11-20T13:23:56,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2d794ae4ba67451a815e931f31ef6dbe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/2d794ae4ba67451a815e931f31ef6dbe 2024-11-20T13:23:56,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/36b9efd59b654e72bde0c383b2c4ffc4 2024-11-20T13:23:56,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/1da3caca4c8a419da2308d485e505cb0 2024-11-20T13:23:56,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d2f4e4c2843a44b1b9bca6cd90e58eb7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/d2f4e4c2843a44b1b9bca6cd90e58eb7 2024-11-20T13:23:56,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/4ae8a61abc574f56bb0a20d1d2d2d982 2024-11-20T13:23:56,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/946e46f3e2074e1dac7c362aa931157d 2024-11-20T13:23:56,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b4fadd121df1494a92f57a22b2b7400b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/b4fadd121df1494a92f57a22b2b7400b 2024-11-20T13:23:56,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/33edafb043d44a0687efc229ea5250e2 2024-11-20T13:23:56,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/365f121bfde548f0a9ed9c3adb82c4e0 2024-11-20T13:23:56,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/45973f01fb8f46ea90e8a4d11a9d01f0 2024-11-20T13:23:56,348 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/recovered.edits/569.seqid, newMaxSeqId=569, maxSeqId=1 2024-11-20T13:23:56,361 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae. 2024-11-20T13:23:56,361 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for cbbdc72320da06253b5398d0c51c77ae: 2024-11-20T13:23:56,365 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=cbbdc72320da06253b5398d0c51c77ae, regionState=CLOSED 2024-11-20T13:23:56,365 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:56,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T13:23:56,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure cbbdc72320da06253b5398d0c51c77ae, server=5ef453f0fbb6,46739,1732109006137 in 2.3290 sec 2024-11-20T13:23:56,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-20T13:23:56,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cbbdc72320da06253b5398d0c51c77ae, UNASSIGN in 2.3590 sec 2024-11-20T13:23:56,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109036391"}]},"ts":"1732109036391"} 2024-11-20T13:23:56,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T13:23:56,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.3710 sec 2024-11-20T13:23:56,397 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:23:56,402 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:23:56,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.4250 sec 2024-11-20T13:23:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T13:23:58,135 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-20T13:23:58,139 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:23:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,145 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=42, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,147 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=42, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-20T13:23:58,156 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:58,162 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/recovered.edits] 2024-11-20T13:23:58,168 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0b4e39e715ce437693ff484d706ab29a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/0b4e39e715ce437693ff484d706ab29a 2024-11-20T13:23:58,174 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8679e59a7c9b4f21815a3a280b75e4f2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/8679e59a7c9b4f21815a3a280b75e4f2 2024-11-20T13:23:58,176 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a1c63bec1aa54c41b81b4288b010c1d2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/A/a1c63bec1aa54c41b81b4288b010c1d2 2024-11-20T13:23:58,179 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/74b8333869b74a4eb9ab13748dd00128 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/74b8333869b74a4eb9ab13748dd00128 2024-11-20T13:23:58,181 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9430d8f5550c4ac18cc52660ecbefc26 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/9430d8f5550c4ac18cc52660ecbefc26 2024-11-20T13:23:58,184 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/ac1d6c1743a64431ade925d128ecd3a4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/B/ac1d6c1743a64431ade925d128ecd3a4 2024-11-20T13:23:58,187 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/9ef0d3a7da314ba6938c62449f1de73f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/9ef0d3a7da314ba6938c62449f1de73f 2024-11-20T13:23:58,189 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/a2b286258ea1483894f8d8d21150512c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/a2b286258ea1483894f8d8d21150512c 2024-11-20T13:23:58,191 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f9f5826e13fc4c1886d9d50578b076d5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/C/f9f5826e13fc4c1886d9d50578b076d5 2024-11-20T13:23:58,196 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/recovered.edits/569.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae/recovered.edits/569.seqid 2024-11-20T13:23:58,197 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/cbbdc72320da06253b5398d0c51c77ae 2024-11-20T13:23:58,197 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:23:58,205 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=42, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T13:23:58,217 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:23:58,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-20T13:23:58,268 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:23:58,270 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=42, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,270 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:23:58,270 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109038270"}]},"ts":"9223372036854775807"} 2024-11-20T13:23:58,276 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:23:58,276 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cbbdc72320da06253b5398d0c51c77ae, NAME => 'TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:23:58,276 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:23:58,276 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109038276"}]},"ts":"9223372036854775807"} 2024-11-20T13:23:58,282 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:23:58,285 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=42, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 146 msec 2024-11-20T13:23:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-20T13:23:58,452 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 42 completed 2024-11-20T13:23:58,470 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 218) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x47fac14f-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1373242094_22 at /127.0.0.1:45864 [Waiting for operation #263] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x47fac14f-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x47fac14f-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1357025633_22 at /127.0.0.1:51112 [Waiting for operation #282] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x47fac14f-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1373242094_22 at /127.0.0.1:45990 [Waiting for operation #224] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5ef453f0fbb6:46739-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=460 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1156 (was 993) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=884 (was 2412) 2024-11-20T13:23:58,484 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=1156, ProcessCount=11, AvailableMemoryMB=883 2024-11-20T13:23:58,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:23:58,487 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:23:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:23:58,490 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:23:58,490 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:58,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 43 2024-11-20T13:23:58,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:23:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:23:58,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741962_1138 (size=963) 2024-11-20T13:23:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:23:58,902 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:23:58,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741963_1139 (size=53) 2024-11-20T13:23:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:23:59,309 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:59,309 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing bb1c53ade43f12e473cc15132f34b609, disabling compactions & flushes 2024-11-20T13:23:59,309 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,309 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,309 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. after waiting 0 ms 2024-11-20T13:23:59,309 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,310 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,310 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:23:59,311 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:23:59,311 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109039311"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109039311"}]},"ts":"1732109039311"} 2024-11-20T13:23:59,316 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:23:59,317 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:23:59,317 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109039317"}]},"ts":"1732109039317"} 2024-11-20T13:23:59,323 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:23:59,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, ASSIGN}] 2024-11-20T13:23:59,330 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, ASSIGN 2024-11-20T13:23:59,330 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:23:59,481 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:59,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; OpenRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:23:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:23:59,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:59,640 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,641 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:23:59,641 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,641 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:23:59,641 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,641 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,643 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,644 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:59,645 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName A 2024-11-20T13:23:59,645 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:59,645 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:59,646 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,647 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:59,647 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName B 2024-11-20T13:23:59,648 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:59,648 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:59,648 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,649 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:23:59,650 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName C 2024-11-20T13:23:59,650 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:23:59,650 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:23:59,651 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,651 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,652 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,653 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:23:59,655 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:23:59,657 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:23:59,658 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened bb1c53ade43f12e473cc15132f34b609; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61276058, jitterRate=-0.08691558241844177}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:23:59,659 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:23:59,659 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., pid=45, masterSystemTime=1732109039636 2024-11-20T13:23:59,661 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,661 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:23:59,662 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:23:59,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-20T13:23:59,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; OpenRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 in 180 msec 2024-11-20T13:23:59,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-20T13:23:59,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, ASSIGN in 338 msec 2024-11-20T13:23:59,667 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:23:59,668 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109039667"}]},"ts":"1732109039667"} 2024-11-20T13:23:59,669 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:23:59,673 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:23:59,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1860 sec 2024-11-20T13:24:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T13:24:00,600 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-20T13:24:00,601 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x509dd4f9 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7246da78 2024-11-20T13:24:00,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c9f2f86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:00,607 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:00,609 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:00,611 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:24:00,613 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:24:00,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:24:00,618 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:24:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:00,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741964_1140 (size=999) 2024-11-20T13:24:01,054 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T13:24:01,054 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T13:24:01,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:24:01,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, REOPEN/MOVE}] 2024-11-20T13:24:01,086 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, REOPEN/MOVE 2024-11-20T13:24:01,086 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,088 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:24:01,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=49, ppid=48, state=RUNNABLE; CloseRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:24:01,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,241 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(124): Close bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,241 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:24:01,241 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1681): Closing bb1c53ade43f12e473cc15132f34b609, disabling compactions & flushes 2024-11-20T13:24:01,241 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,241 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,241 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. after waiting 0 ms 2024-11-20T13:24:01,241 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,245 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T13:24:01,246 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,246 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1635): Region close journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:01,246 WARN [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegionServer(3786): Not adding moved region record: bb1c53ade43f12e473cc15132f34b609 to self. 2024-11-20T13:24:01,248 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(170): Closed bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,248 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=CLOSED 2024-11-20T13:24:01,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=49, resume processing ppid=48 2024-11-20T13:24:01,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, ppid=48, state=SUCCESS; CloseRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 in 162 msec 2024-11-20T13:24:01,251 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, REOPEN/MOVE; state=CLOSED, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=true 2024-11-20T13:24:01,402 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,404 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=48, state=RUNNABLE; OpenRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:24:01,556 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,560 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,560 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7285): Opening region: {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:24:01,560 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,561 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:24:01,561 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7327): checking encryption for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,561 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7330): checking classloading for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,563 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,564 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:01,568 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName A 2024-11-20T13:24:01,570 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:01,571 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:01,572 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,572 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:01,573 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName B 2024-11-20T13:24:01,573 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:01,573 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:01,573 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,574 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:01,574 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1c53ade43f12e473cc15132f34b609 columnFamilyName C 2024-11-20T13:24:01,574 DEBUG [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:01,575 INFO [StoreOpener-bb1c53ade43f12e473cc15132f34b609-1 {}] regionserver.HStore(327): Store=bb1c53ade43f12e473cc15132f34b609/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:01,575 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,575 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,576 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,578 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:24:01,579 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1085): writing seq id for bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,581 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1102): Opened bb1c53ade43f12e473cc15132f34b609; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58822594, jitterRate=-0.12347504496574402}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:24:01,581 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1001): Region open journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:01,582 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., pid=50, masterSystemTime=1732109041556 2024-11-20T13:24:01,584 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,584 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,584 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=OPEN, openSeqNum=5, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=48 2024-11-20T13:24:01,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=48, state=SUCCESS; OpenRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 in 183 msec 2024-11-20T13:24:01,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T13:24:01,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, REOPEN/MOVE in 506 msec 2024-11-20T13:24:01,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-20T13:24:01,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 526 msec 2024-11-20T13:24:01,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 975 msec 2024-11-20T13:24:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-11-20T13:24:01,604 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x21c6f06d to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5944672 2024-11-20T13:24:01,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bbc6c2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,620 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61722a7c to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@216edc96 2024-11-20T13:24:01,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75bd74b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,629 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d90494b to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b31b770 2024-11-20T13:24:01,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0cb54c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a57be6e to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1bf433a9 2024-11-20T13:24:01,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f210c07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x554f4590 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7edb89e1 2024-11-20T13:24:01,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@799df309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,654 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51c9d640 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@475a72cb 2024-11-20T13:24:01,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a4d3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,665 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12d65163 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68ea5d8 2024-11-20T13:24:01,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@541f4b98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e20f1cc to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df339b5 2024-11-20T13:24:01,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@339ef218, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,681 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bc461df to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f7ed183 2024-11-20T13:24:01,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bb4e246, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:01,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T13:24:01,692 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:01,693 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:01,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:01,700 DEBUG [hconnection-0x219c5acc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,700 DEBUG [hconnection-0x3f1760b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,702 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,708 DEBUG [hconnection-0x2262414-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,717 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,730 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,756 DEBUG [hconnection-0x38f49b5c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,758 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,765 DEBUG [hconnection-0x2f38586a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,766 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:01,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:01,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:01,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:01,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:01,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:01,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:01,832 DEBUG [hconnection-0x72cde130-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,836 DEBUG [hconnection-0x6180ffbe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,836 DEBUG [hconnection-0x20a8fd53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,839 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,839 DEBUG [hconnection-0x5d23d85f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:01,840 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,840 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,844 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:01,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:01,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:01,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:01,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:01,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:01,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109101864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109101866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109101866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109101871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109101873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209779dc3a44e84dda9428ce837df485c3_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109041786/Put/seqid=0 2024-11-20T13:24:01,894 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:24:01,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741965_1141 (size=12154) 2024-11-20T13:24:01,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109101977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109101977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109101977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109101981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109101981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:02,004 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,158 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109102182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109102182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109102187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109102189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109102189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,258 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T13:24:02,260 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T13:24:02,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:02,313 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,315 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:02,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,321 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209779dc3a44e84dda9428ce837df485c3_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209779dc3a44e84dda9428ce837df485c3_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:02,322 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5212d0a138c1416599c0a07a0f36d8ec, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:02,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5212d0a138c1416599c0a07a0f36d8ec is 175, key is test_row_0/A:col10/1732109041786/Put/seqid=0 2024-11-20T13:24:02,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741966_1142 (size=30955) 2024-11-20T13:24:02,359 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5212d0a138c1416599c0a07a0f36d8ec 2024-11-20T13:24:02,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/1f2042f4c2fe44a6a0ee8a6047b56b7a is 50, key is test_row_0/B:col10/1732109041786/Put/seqid=0 2024-11-20T13:24:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741967_1143 (size=12001) 2024-11-20T13:24:02,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109102501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109102501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109102501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109102501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109102501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,621 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,776 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:02,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/1f2042f4c2fe44a6a0ee8a6047b56b7a 2024-11-20T13:24:02,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/45284dcc76864d94ad428872f890456a is 50, key is test_row_0/C:col10/1732109041786/Put/seqid=0 2024-11-20T13:24:02,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:02,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:02,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:02,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:02,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741968_1144 (size=12001) 2024-11-20T13:24:02,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/45284dcc76864d94ad428872f890456a 2024-11-20T13:24:02,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5212d0a138c1416599c0a07a0f36d8ec as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec 2024-11-20T13:24:02,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T13:24:02,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/1f2042f4c2fe44a6a0ee8a6047b56b7a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a 2024-11-20T13:24:02,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:24:02,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/45284dcc76864d94ad428872f890456a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a 2024-11-20T13:24:03,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:24:03,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bb1c53ade43f12e473cc15132f34b609 in 1214ms, sequenceid=15, compaction requested=false 2024-11-20T13:24:03,003 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T13:24:03,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:03,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:03,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109103032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109103035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109103040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109103042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109103043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:03,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fba122482ca74759b30b8ee40d6de440_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109043035/Put/seqid=0 2024-11-20T13:24:03,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109103144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109103145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109103151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109103156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109103155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741970_1146 (size=29238) 2024-11-20T13:24:03,178 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,183 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fba122482ca74759b30b8ee40d6de440_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fba122482ca74759b30b8ee40d6de440_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:03,186 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/56ff408a3eb34472aa10d74ef3985902, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:03,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/56ff408a3eb34472aa10d74ef3985902 is 175, key is test_row_0/A:col10/1732109043035/Put/seqid=0 2024-11-20T13:24:03,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741969_1145 (size=91179) 2024-11-20T13:24:03,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109103348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109103353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109103361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109103350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109103362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:03,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,592 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=47, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/56ff408a3eb34472aa10d74ef3985902 2024-11-20T13:24:03,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/51c9d383682c492fa8f3e0ebb01e9ee8 is 50, key is test_row_0/B:col10/1732109043035/Put/seqid=0 2024-11-20T13:24:03,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109103666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109103674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109103678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741971_1147 (size=12001) 2024-11-20T13:24:03,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/51c9d383682c492fa8f3e0ebb01e9ee8 2024-11-20T13:24:03,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109103684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109103688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/c0bba01cc4cb4537a826ecd66d6566b4 is 50, key is test_row_0/C:col10/1732109043035/Put/seqid=0 2024-11-20T13:24:03,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:03,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:03,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741972_1148 (size=12001) 2024-11-20T13:24:03,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/c0bba01cc4cb4537a826ecd66d6566b4 2024-11-20T13:24:03,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/56ff408a3eb34472aa10d74ef3985902 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902 2024-11-20T13:24:03,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902, entries=500, sequenceid=47, filesize=89.0 K 2024-11-20T13:24:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:03,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/51c9d383682c492fa8f3e0ebb01e9ee8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8 2024-11-20T13:24:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8, entries=150, sequenceid=47, filesize=11.7 K 2024-11-20T13:24:03,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/c0bba01cc4cb4537a826ecd66d6566b4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4 2024-11-20T13:24:03,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4, entries=150, sequenceid=47, filesize=11.7 K 2024-11-20T13:24:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for bb1c53ade43f12e473cc15132f34b609 in 812ms, sequenceid=47, compaction requested=false 2024-11-20T13:24:03,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T13:24:03,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:03,872 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:03,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:03,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:03,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a298e74f6ece49ee9aadb9e3256b47ed_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_1/A:col10/1732109043040/Put/seqid=0 2024-11-20T13:24:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741973_1149 (size=9714) 2024-11-20T13:24:03,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:04,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109104319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109104321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109104328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109104331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109104331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:04,349 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a298e74f6ece49ee9aadb9e3256b47ed_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a298e74f6ece49ee9aadb9e3256b47ed_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:04,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/67f1812bf7c34800807de799425297b6, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/67f1812bf7c34800807de799425297b6 is 175, key is test_row_1/A:col10/1732109043040/Put/seqid=0 2024-11-20T13:24:04,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741974_1150 (size=22361) 2024-11-20T13:24:04,383 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/67f1812bf7c34800807de799425297b6 2024-11-20T13:24:04,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/5b67c324c30342359a0de04b293bc8dd is 50, key is test_row_1/B:col10/1732109043040/Put/seqid=0 2024-11-20T13:24:04,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741975_1151 (size=9657) 2024-11-20T13:24:04,408 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/5b67c324c30342359a0de04b293bc8dd 2024-11-20T13:24:04,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/a8e50c0753274b8787c5a4d30f3f15d4 is 50, key is test_row_1/C:col10/1732109043040/Put/seqid=0 2024-11-20T13:24:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109104436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109104437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109104438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109104438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109104439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741976_1152 (size=9657) 2024-11-20T13:24:04,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109104639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109104642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109104643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109104645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109104645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,905 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/a8e50c0753274b8787c5a4d30f3f15d4 2024-11-20T13:24:04,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/67f1812bf7c34800807de799425297b6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6 2024-11-20T13:24:04,921 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6, entries=100, sequenceid=52, filesize=21.8 K 2024-11-20T13:24:04,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/5b67c324c30342359a0de04b293bc8dd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd 2024-11-20T13:24:04,929 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd, entries=100, sequenceid=52, filesize=9.4 K 2024-11-20T13:24:04,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/a8e50c0753274b8787c5a4d30f3f15d4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4 2024-11-20T13:24:04,938 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4, entries=100, sequenceid=52, filesize=9.4 K 2024-11-20T13:24:04,941 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=194.56 KB/199230 for bb1c53ade43f12e473cc15132f34b609 in 1069ms, sequenceid=52, compaction requested=true 2024-11-20T13:24:04,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:04,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:04,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T13:24:04,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T13:24:04,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T13:24:04,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2510 sec 2024-11-20T13:24:04,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.2560 sec 2024-11-20T13:24:04,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:04,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:04,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109104948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109104952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109104953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109104954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:04,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109104966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120575ab2b54c4d4526a43fff8a990a7743_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:05,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741977_1153 (size=14594) 2024-11-20T13:24:05,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109105068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109105274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109105458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109105460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109105461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,469 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:05,475 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120575ab2b54c4d4526a43fff8a990a7743_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120575ab2b54c4d4526a43fff8a990a7743_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:05,477 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/bae77b3a9a6b4e0f8fbcb704e3af427a, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:05,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/bae77b3a9a6b4e0f8fbcb704e3af427a is 175, key is test_row_0/A:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:05,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109105482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741978_1154 (size=39549) 2024-11-20T13:24:05,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109105581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T13:24:05,803 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T13:24:05,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T13:24:05,808 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T13:24:05,809 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:05,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T13:24:05,948 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=85, memsize=67.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/bae77b3a9a6b4e0f8fbcb704e3af427a 2024-11-20T13:24:05,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:05,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T13:24:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:05,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a9d3fbd3dfc84eb883c64ee074355ebd is 50, key is test_row_0/B:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:06,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741979_1155 (size=12001) 2024-11-20T13:24:06,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a9d3fbd3dfc84eb883c64ee074355ebd 2024-11-20T13:24:06,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 is 50, key is test_row_0/C:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109106088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T13:24:06,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T13:24:06,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:06,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741980_1156 (size=12001) 2024-11-20T13:24:06,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 2024-11-20T13:24:06,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/bae77b3a9a6b4e0f8fbcb704e3af427a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a 2024-11-20T13:24:06,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a, entries=200, sequenceid=85, filesize=38.6 K 2024-11-20T13:24:06,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a9d3fbd3dfc84eb883c64ee074355ebd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd 2024-11-20T13:24:06,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd, entries=150, sequenceid=85, filesize=11.7 K 2024-11-20T13:24:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 2024-11-20T13:24:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4, entries=150, sequenceid=85, filesize=11.7 K 2024-11-20T13:24:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for bb1c53ade43f12e473cc15132f34b609 in 1229ms, sequenceid=85, compaction requested=true 2024-11-20T13:24:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:06,182 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:06,182 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:06,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:06,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:06,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:06,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:06,188 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 184044 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:06,188 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:06,188 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,188 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=179.7 K 2024-11-20T13:24:06,188 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,188 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a] 2024-11-20T13:24:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,189 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:06,189 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:06,189 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,189 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=44.6 K 2024-11-20T13:24:06,189 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f2042f4c2fe44a6a0ee8a6047b56b7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109041775 2024-11-20T13:24:06,190 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5212d0a138c1416599c0a07a0f36d8ec, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109041775 2024-11-20T13:24:06,190 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 51c9d383682c492fa8f3e0ebb01e9ee8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732109041863 2024-11-20T13:24:06,190 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56ff408a3eb34472aa10d74ef3985902, keycount=500, bloomtype=ROW, size=89.0 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732109041860 2024-11-20T13:24:06,191 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b67c324c30342359a0de04b293bc8dd, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109043040 2024-11-20T13:24:06,191 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67f1812bf7c34800807de799425297b6, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109043040 2024-11-20T13:24:06,191 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a9d3fbd3dfc84eb883c64ee074355ebd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:06,192 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bae77b3a9a6b4e0f8fbcb704e3af427a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,221 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#135 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,222 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/56eb81d9cd524f0898dd99d22c16cbb1 is 50, key is test_row_0/B:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,233 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,247 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b85a5f084268465ab1dbcef129e7c1b0_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,252 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b85a5f084268465ab1dbcef129e7c1b0_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,252 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b85a5f084268465ab1dbcef129e7c1b0_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,280 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T13:24:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:06,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T13:24:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T13:24:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T13:24:06,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 476 msec 2024-11-20T13:24:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741981_1157 (size=12139) 2024-11-20T13:24:06,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 482 msec 2024-11-20T13:24:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,318 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/56eb81d9cd524f0898dd99d22c16cbb1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/56eb81d9cd524f0898dd99d22c16cbb1 2024-11-20T13:24:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,324 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into 56eb81d9cd524f0898dd99d22c16cbb1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:06,324 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:06,324 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=12, startTime=1732109046182; duration=0sec 2024-11-20T13:24:06,325 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,325 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:06,325 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,326 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,326 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:06,326 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,326 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=44.6 K 2024-11-20T13:24:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,327 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 45284dcc76864d94ad428872f890456a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109041775 2024-11-20T13:24:06,327 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c0bba01cc4cb4537a826ecd66d6566b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732109041863 2024-11-20T13:24:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,328 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a8e50c0753274b8787c5a4d30f3f15d4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109043040 2024-11-20T13:24:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,328 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d6dd816ac85e4d13baf6cfdc60e2e5f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741982_1158 (size=4469) 2024-11-20T13:24:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,349 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#136 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,354 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#137 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:06,355 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/393e83c501fc4fb188c1d8a8ee1e5145 is 50, key is test_row_0/C:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,357 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/6f9ccaadd26a4f468c10534e7123287c is 175, key is test_row_0/A:col10/1732109044317/Put/seqid=0 2024-11-20T13:24:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741983_1159 (size=12139) 2024-11-20T13:24:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741984_1160 (size=31093) 2024-11-20T13:24:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T13:24:06,422 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T13:24:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,425 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T13:24:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,427 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,428 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:06,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:06,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:06,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:06,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,581 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:06,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f6698f8c92344e79c9308f62046359d_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109046521/Put/seqid=0 2024-11-20T13:24:06,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:06,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109106637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741986_1162 (size=26798) 2024-11-20T13:24:06,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109106650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109106651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109106651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:06,735 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:06,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109106753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109106768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109106769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109106769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,848 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/393e83c501fc4fb188c1d8a8ee1e5145 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/393e83c501fc4fb188c1d8a8ee1e5145 2024-11-20T13:24:06,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/6f9ccaadd26a4f468c10534e7123287c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c 2024-11-20T13:24:06,859 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into 393e83c501fc4fb188c1d8a8ee1e5145(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:06,859 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:06,859 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=12, startTime=1732109046184; duration=0sec 2024-11-20T13:24:06,859 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:06,859 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:06,863 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into 6f9ccaadd26a4f468c10534e7123287c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:06,863 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:06,863 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=12, startTime=1732109046182; duration=0sec 2024-11-20T13:24:06,866 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:06,866 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:06,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:06,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:06,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:06,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109106957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109106976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109106976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:06,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:06,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109106976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:07,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,063 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:24:07,064 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:07,090 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f6698f8c92344e79c9308f62046359d_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f6698f8c92344e79c9308f62046359d_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:07,097 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/96fd4d9ac58e47098c2b69d86ec9786e, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:07,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/96fd4d9ac58e47098c2b69d86ec9786e is 175, key is test_row_0/A:col10/1732109046521/Put/seqid=0 2024-11-20T13:24:07,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109107097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741985_1161 (size=82585) 2024-11-20T13:24:07,105 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=99, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/96fd4d9ac58e47098c2b69d86ec9786e 2024-11-20T13:24:07,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/15ceb5620a204449836074b89be37d61 is 50, key is test_row_0/B:col10/1732109046521/Put/seqid=0 2024-11-20T13:24:07,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741987_1163 (size=12001) 2024-11-20T13:24:07,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/15ceb5620a204449836074b89be37d61 2024-11-20T13:24:07,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/f3cfab69a7b840aeba9d7d3437eca4bc is 50, key is test_row_0/C:col10/1732109046521/Put/seqid=0 2024-11-20T13:24:07,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109107263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741988_1164 (size=12001) 2024-11-20T13:24:07,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109107289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109107296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109107311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,360 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:07,668 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/f3cfab69a7b840aeba9d7d3437eca4bc 2024-11-20T13:24:07,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/96fd4d9ac58e47098c2b69d86ec9786e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e 2024-11-20T13:24:07,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e, entries=450, sequenceid=99, filesize=80.6 K 2024-11-20T13:24:07,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/15ceb5620a204449836074b89be37d61 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61 2024-11-20T13:24:07,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61, entries=150, sequenceid=99, filesize=11.7 K 2024-11-20T13:24:07,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/f3cfab69a7b840aeba9d7d3437eca4bc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc 2024-11-20T13:24:07,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc, entries=150, sequenceid=99, filesize=11.7 K 2024-11-20T13:24:07,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for bb1c53ade43f12e473cc15132f34b609 in 1200ms, sequenceid=99, compaction requested=false 2024-11-20T13:24:07,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:07,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T13:24:07,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:07,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:07,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:07,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:07,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:07,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:07,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109107816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109107825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109107826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109107817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120656a2192815f4746ae095632c3d91abf_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:07,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741989_1165 (size=14594) 2024-11-20T13:24:07,899 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:07,906 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120656a2192815f4746ae095632c3d91abf_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120656a2192815f4746ae095632c3d91abf_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:07,908 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/08b69f80e3484befbad9ff922828339f, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:07,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/08b69f80e3484befbad9ff922828339f is 175, key is test_row_0/A:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:07,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109107931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109107941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109107964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741990_1166 (size=39549) 2024-11-20T13:24:07,971 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/08b69f80e3484befbad9ff922828339f 2024-11-20T13:24:07,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:07,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:07,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:07,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:07,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:07,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/ddf22ec5b0604a11ab49e3ddb14cd6db is 50, key is test_row_0/B:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:08,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741991_1167 (size=12001) 2024-11-20T13:24:08,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:08,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:08,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109108137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109108146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109108182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,294 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/ddf22ec5b0604a11ab49e3ddb14cd6db 2024-11-20T13:24:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109108446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,448 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109108453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e2737301045444be96b9fc3504854d63 is 50, key is test_row_0/C:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:08,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:08,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109108533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741992_1168 (size=12001) 2024-11-20T13:24:08,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e2737301045444be96b9fc3504854d63 2024-11-20T13:24:08,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:08,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:08,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:08,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/08b69f80e3484befbad9ff922828339f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f 2024-11-20T13:24:08,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f, entries=200, sequenceid=128, filesize=38.6 K 2024-11-20T13:24:08,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/ddf22ec5b0604a11ab49e3ddb14cd6db as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db 2024-11-20T13:24:08,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T13:24:08,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e2737301045444be96b9fc3504854d63 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63 2024-11-20T13:24:08,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T13:24:08,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for bb1c53ade43f12e473cc15132f34b609 in 858ms, sequenceid=128, compaction requested=true 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:08,642 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:08,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:08,642 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:08,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:08,644 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:08,644 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 153227 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:08,646 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:08,646 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:08,646 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,646 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,646 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/56eb81d9cd524f0898dd99d22c16cbb1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.3 K 2024-11-20T13:24:08,646 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=149.6 K 2024-11-20T13:24:08,647 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,647 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f] 2024-11-20T13:24:08,647 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 56eb81d9cd524f0898dd99d22c16cbb1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:08,648 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f9ccaadd26a4f468c10534e7123287c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:08,648 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 15ceb5620a204449836074b89be37d61, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732109046504 2024-11-20T13:24:08,649 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96fd4d9ac58e47098c2b69d86ec9786e, keycount=450, bloomtype=ROW, size=80.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732109046493 2024-11-20T13:24:08,650 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting ddf22ec5b0604a11ab49e3ddb14cd6db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:08,650 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08b69f80e3484befbad9ff922828339f, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:08,682 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:08,683 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/f9daf1715e1d4db9876e0a277492a860 is 50, key is test_row_0/B:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:08,698 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:08,751 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204ecb2c21be164a3cb939337cd4bbb191_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:08,754 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204ecb2c21be164a3cb939337cd4bbb191_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:08,754 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204ecb2c21be164a3cb939337cd4bbb191_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:08,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741993_1169 (size=12241) 2024-11-20T13:24:08,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:08,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T13:24:08,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,767 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T13:24:08,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:08,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:08,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:08,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:08,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:08,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:08,772 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/f9daf1715e1d4db9876e0a277492a860 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/f9daf1715e1d4db9876e0a277492a860 2024-11-20T13:24:08,780 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into f9daf1715e1d4db9876e0a277492a860(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:08,780 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:08,780 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=13, startTime=1732109048642; duration=0sec 2024-11-20T13:24:08,780 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:08,780 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:08,780 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:08,782 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:08,782 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:08,782 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:08,782 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/393e83c501fc4fb188c1d8a8ee1e5145, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.3 K 2024-11-20T13:24:08,783 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 393e83c501fc4fb188c1d8a8ee1e5145, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732109044317 2024-11-20T13:24:08,787 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f3cfab69a7b840aeba9d7d3437eca4bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732109046504 2024-11-20T13:24:08,788 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e2737301045444be96b9fc3504854d63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:08,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741994_1170 (size=4469) 2024-11-20T13:24:08,832 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#145 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:08,833 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/93693d780742415bb3a382cb78a5df77 is 175, key is test_row_0/A:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:08,852 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#146 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:08,853 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/cc17287eaf8c48e9b2cd8721037ab1a0 is 50, key is test_row_0/C:col10/1732109046647/Put/seqid=0 2024-11-20T13:24:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:08,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:08,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004aa1bcc6ca14ac8b5f20e356dffa139_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109047814/Put/seqid=0 2024-11-20T13:24:08,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741995_1171 (size=31195) 2024-11-20T13:24:08,940 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/93693d780742415bb3a382cb78a5df77 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77 2024-11-20T13:24:08,952 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into 93693d780742415bb3a382cb78a5df77(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:08,952 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:08,952 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=13, startTime=1732109048642; duration=0sec 2024-11-20T13:24:08,952 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:08,952 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:08,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741996_1172 (size=12241) 2024-11-20T13:24:08,967 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/cc17287eaf8c48e9b2cd8721037ab1a0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/cc17287eaf8c48e9b2cd8721037ab1a0 2024-11-20T13:24:08,974 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into cc17287eaf8c48e9b2cd8721037ab1a0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:08,974 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:08,974 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=13, startTime=1732109048642; duration=0sec 2024-11-20T13:24:08,974 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:08,974 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:09,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741997_1173 (size=12304) 2024-11-20T13:24:09,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109109030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109109028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109109041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109109041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109109114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,122 DEBUG [Thread-684 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:09,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109109142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109109146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109109157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109109349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109109350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109109361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:09,411 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004aa1bcc6ca14ac8b5f20e356dffa139_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004aa1bcc6ca14ac8b5f20e356dffa139_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:09,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/263e769281a145b09771bf9f6d415bf1, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:09,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/263e769281a145b09771bf9f6d415bf1 is 175, key is test_row_0/A:col10/1732109047814/Put/seqid=0 2024-11-20T13:24:09,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741998_1174 (size=31105) 2024-11-20T13:24:09,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109109654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109109669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109109669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:09,888 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/263e769281a145b09771bf9f6d415bf1 2024-11-20T13:24:09,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/28145851c292481889972c3b2d7427ea is 50, key is test_row_0/B:col10/1732109047814/Put/seqid=0 2024-11-20T13:24:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741999_1175 (size=12151) 2024-11-20T13:24:09,963 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/28145851c292481889972c3b2d7427ea 2024-11-20T13:24:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/6c3d9f7e285d41c0abdabda54720c350 is 50, key is test_row_0/C:col10/1732109047814/Put/seqid=0 2024-11-20T13:24:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742000_1176 (size=12151) 2024-11-20T13:24:10,032 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/6c3d9f7e285d41c0abdabda54720c350 2024-11-20T13:24:10,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/263e769281a145b09771bf9f6d415bf1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1 2024-11-20T13:24:10,049 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1, entries=150, sequenceid=137, filesize=30.4 K 2024-11-20T13:24:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/28145851c292481889972c3b2d7427ea as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea 2024-11-20T13:24:10,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea, entries=150, sequenceid=137, filesize=11.9 K 2024-11-20T13:24:10,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109110054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/6c3d9f7e285d41c0abdabda54720c350 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350 2024-11-20T13:24:10,075 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350, entries=150, sequenceid=137, filesize=11.9 K 2024-11-20T13:24:10,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for bb1c53ade43f12e473cc15132f34b609 in 1327ms, sequenceid=137, compaction requested=false 2024-11-20T13:24:10,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:10,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T13:24:10,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T13:24:10,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T13:24:10,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6700 sec 2024-11-20T13:24:10,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 3.6770 sec 2024-11-20T13:24:10,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T13:24:10,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:10,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:10,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:10,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:10,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:10,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:10,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109110198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112001cd2cb43a04407cb994cffc440bd2a9_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:10,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109110208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109110227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742001_1177 (size=12304) 2024-11-20T13:24:10,248 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:10,255 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112001cd2cb43a04407cb994cffc440bd2a9_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001cd2cb43a04407cb994cffc440bd2a9_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:10,257 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/72ba206b1ba14500887d13f607f97606, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:10,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/72ba206b1ba14500887d13f607f97606 is 175, key is test_row_0/A:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:10,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742002_1178 (size=31105) 2024-11-20T13:24:10,302 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/72ba206b1ba14500887d13f607f97606 2024-11-20T13:24:10,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109110316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109110316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/87501b3a48a64cb08eb15dfe1a6875a2 is 50, key is test_row_0/B:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109110333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742003_1179 (size=12151) 2024-11-20T13:24:10,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/87501b3a48a64cb08eb15dfe1a6875a2 2024-11-20T13:24:10,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8705ae741b734b788a49a2082d34ae7c is 50, key is test_row_0/C:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:10,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742004_1180 (size=12151) 2024-11-20T13:24:10,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109110524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109110524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T13:24:10,538 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T13:24:10,540 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:10,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T13:24:10,542 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:10,542 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:10,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:10,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:10,554 INFO [master/5ef453f0fbb6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T13:24:10,555 INFO [master/5ef453f0fbb6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T13:24:10,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109110558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:10,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T13:24:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109110839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109110840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:10,856 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T13:24:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:10,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8705ae741b734b788a49a2082d34ae7c 2024-11-20T13:24:10,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109110873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:10,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/72ba206b1ba14500887d13f607f97606 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606 2024-11-20T13:24:10,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606, entries=150, sequenceid=169, filesize=30.4 K 2024-11-20T13:24:10,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/87501b3a48a64cb08eb15dfe1a6875a2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2 2024-11-20T13:24:10,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T13:24:10,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8705ae741b734b788a49a2082d34ae7c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c 2024-11-20T13:24:10,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T13:24:10,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for bb1c53ade43f12e473cc15132f34b609 in 751ms, sequenceid=169, compaction requested=true 2024-11-20T13:24:10,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:10,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:10,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:10,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:10,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:10,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:10,924 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:10,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:10,925 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:10,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:10,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:10,927 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,927 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=91.2 K 2024-11-20T13:24:10,927 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606] 2024-11-20T13:24:10,927 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:10,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93693d780742415bb3a382cb78a5df77, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:10,928 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:10,928 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:10,928 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/f9daf1715e1d4db9876e0a277492a860, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.7 K 2024-11-20T13:24:10,928 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 263e769281a145b09771bf9f6d415bf1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732109047792 2024-11-20T13:24:10,928 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f9daf1715e1d4db9876e0a277492a860, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:10,929 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72ba206b1ba14500887d13f607f97606, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:10,929 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 28145851c292481889972c3b2d7427ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732109047792 2024-11-20T13:24:10,930 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 87501b3a48a64cb08eb15dfe1a6875a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:10,950 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:10,951 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/9e6294bf83c949c7baf563b1fe3b9c28 is 50, key is test_row_0/B:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:10,971 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:10,996 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120aae763e1dab04ad18f2751679ce98ed9_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:10,998 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120aae763e1dab04ad18f2751679ce98ed9_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:10,999 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aae763e1dab04ad18f2751679ce98ed9_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:11,009 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T13:24:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:11,018 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:11,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:11,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742005_1181 (size=12493) 2024-11-20T13:24:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742006_1182 (size=4469) 2024-11-20T13:24:11,085 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#154 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:11,086 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/295d6df7ee79475e9a1caadc2b4e9d7a is 175, key is test_row_0/A:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:11,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fd0e8cef9518479f9c7b58bb1f9cd3ca_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109050206/Put/seqid=0 2024-11-20T13:24:11,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742007_1183 (size=31447) 2024-11-20T13:24:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:11,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742008_1184 (size=12304) 2024-11-20T13:24:11,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:11,443 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/9e6294bf83c949c7baf563b1fe3b9c28 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9e6294bf83c949c7baf563b1fe3b9c28 2024-11-20T13:24:11,449 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into 9e6294bf83c949c7baf563b1fe3b9c28(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:11,450 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:11,450 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=13, startTime=1732109050924; duration=0sec 2024-11-20T13:24:11,450 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:11,450 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:11,450 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:11,451 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:11,451 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:11,452 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:11,452 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/cc17287eaf8c48e9b2cd8721037ab1a0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.7 K 2024-11-20T13:24:11,452 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cc17287eaf8c48e9b2cd8721037ab1a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732109046630 2024-11-20T13:24:11,453 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c3d9f7e285d41c0abdabda54720c350, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732109047792 2024-11-20T13:24:11,453 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8705ae741b734b788a49a2082d34ae7c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:11,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109111440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109111442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109111461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,478 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:11,479 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/babe360a2e7242c28c06f7f684aff579 is 50, key is test_row_0/C:col10/1732109049028/Put/seqid=0 2024-11-20T13:24:11,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742009_1185 (size=12493) 2024-11-20T13:24:11,553 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/295d6df7ee79475e9a1caadc2b4e9d7a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a 2024-11-20T13:24:11,559 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/babe360a2e7242c28c06f7f684aff579 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/babe360a2e7242c28c06f7f684aff579 2024-11-20T13:24:11,561 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into 295d6df7ee79475e9a1caadc2b4e9d7a(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:11,561 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:11,561 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=13, startTime=1732109050924; duration=0sec 2024-11-20T13:24:11,561 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:11,561 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:11,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:11,569 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into babe360a2e7242c28c06f7f684aff579(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:11,570 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:11,570 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=13, startTime=1732109050924; duration=0sec 2024-11-20T13:24:11,570 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:11,570 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:11,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109111563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109111568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,572 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fd0e8cef9518479f9c7b58bb1f9cd3ca_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd0e8cef9518479f9c7b58bb1f9cd3ca_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:11,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109111569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/52e206a5338943bdb6b1b3d5e78b75fe, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:11,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/52e206a5338943bdb6b1b3d5e78b75fe is 175, key is test_row_0/A:col10/1732109050206/Put/seqid=0 2024-11-20T13:24:11,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742010_1186 (size=31105) 2024-11-20T13:24:11,635 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/52e206a5338943bdb6b1b3d5e78b75fe 2024-11-20T13:24:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:11,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a527268b231b4d0b8e47615259ce9adf is 50, key is test_row_0/B:col10/1732109050206/Put/seqid=0 2024-11-20T13:24:11,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742011_1187 (size=12151) 2024-11-20T13:24:11,744 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a527268b231b4d0b8e47615259ce9adf 2024-11-20T13:24:11,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8a3a3440395a42dda8902aa4f97aa3a2 is 50, key is test_row_0/C:col10/1732109050206/Put/seqid=0 2024-11-20T13:24:11,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109111772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109111774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109111774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:11,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742012_1188 (size=12151) 2024-11-20T13:24:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109112078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,084 DEBUG [Thread-690 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4260 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:12,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109112085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109112086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109112086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,271 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8a3a3440395a42dda8902aa4f97aa3a2 2024-11-20T13:24:12,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/52e206a5338943bdb6b1b3d5e78b75fe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe 2024-11-20T13:24:12,285 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe, entries=150, sequenceid=177, filesize=30.4 K 2024-11-20T13:24:12,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a527268b231b4d0b8e47615259ce9adf as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf 2024-11-20T13:24:12,294 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T13:24:12,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8a3a3440395a42dda8902aa4f97aa3a2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2 2024-11-20T13:24:12,302 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2, entries=150, sequenceid=177, filesize=11.9 K 2024-11-20T13:24:12,304 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for bb1c53ade43f12e473cc15132f34b609 in 1285ms, sequenceid=177, compaction requested=false 2024-11-20T13:24:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T13:24:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T13:24:12,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T13:24:12,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7620 sec 2024-11-20T13:24:12,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.7670 sec 2024-11-20T13:24:12,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:12,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:12,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109112602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109112606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109112608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203f7b5e8217224783ba89d3db13e5478a_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:12,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T13:24:12,651 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T13:24:12,653 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T13:24:12,654 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:12,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:12,655 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:12,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:12,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742013_1189 (size=14794) 2024-11-20T13:24:12,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109112710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109112713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109112714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:12,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:12,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:12,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109112915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109112921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109112921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:12,962 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:12,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:12,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:12,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:12,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:12,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:12,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,064 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,070 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203f7b5e8217224783ba89d3db13e5478a_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203f7b5e8217224783ba89d3db13e5478a_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:13,071 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5935fb252ea64a0eaacf7cd971fd1741, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:13,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5935fb252ea64a0eaacf7cd971fd1741 is 175, key is test_row_0/A:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742014_1190 (size=39749) 2024-11-20T13:24:13,111 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5935fb252ea64a0eaacf7cd971fd1741 2024-11-20T13:24:13,116 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:13,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:13,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/0979c0beb64b4b8ab642113dc82a457f is 50, key is test_row_0/B:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742015_1191 (size=12151) 2024-11-20T13:24:13,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/0979c0beb64b4b8ab642113dc82a457f 2024-11-20T13:24:13,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37072 deadline: 1732109113168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,175 DEBUG [Thread-684 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8221 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:13,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8494a7f7d6f9437d856cef1960ae4d7d is 50, key is test_row_0/C:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109113219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109113224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109113226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742016_1192 (size=12151) 2024-11-20T13:24:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:13,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:13,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:13,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,425 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:13,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:13,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:13,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8494a7f7d6f9437d856cef1960ae4d7d 2024-11-20T13:24:13,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5935fb252ea64a0eaacf7cd971fd1741 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741 2024-11-20T13:24:13,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741, entries=200, sequenceid=211, filesize=38.8 K 2024-11-20T13:24:13,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/0979c0beb64b4b8ab642113dc82a457f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f 2024-11-20T13:24:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T13:24:13,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8494a7f7d6f9437d856cef1960ae4d7d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d 2024-11-20T13:24:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T13:24:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for bb1c53ade43f12e473cc15132f34b609 in 1080ms, sequenceid=211, compaction requested=true 2024-11-20T13:24:13,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:13,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:13,676 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:13,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,678 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:13,678 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:13,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:13,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:13,678 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,678 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=99.9 K 2024-11-20T13:24:13,678 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,679 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741] 2024-11-20T13:24:13,679 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:13,679 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:13,679 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,679 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9e6294bf83c949c7baf563b1fe3b9c28, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.9 K 2024-11-20T13:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,679 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 295d6df7ee79475e9a1caadc2b4e9d7a, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,680 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e6294bf83c949c7baf563b1fe3b9c28, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:13,680 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52e206a5338943bdb6b1b3d5e78b75fe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732109050191 2024-11-20T13:24:13,680 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a527268b231b4d0b8e47615259ce9adf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732109050191 2024-11-20T13:24:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,681 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5935fb252ea64a0eaacf7cd971fd1741, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,681 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0979c0beb64b4b8ab642113dc82a457f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,701 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,718 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#163 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,719 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/dd0f0b26fcf140e88781045cbf1b0377 is 50, key is test_row_0/B:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,727 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120eb778d9fe8f9480ebf3bff4035589777_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120eb778d9fe8f9480ebf3bff4035589777_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eb778d9fe8f9480ebf3bff4035589777_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T13:24:13,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,735 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-20T13:24:13,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:13,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:13,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:13,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:13,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:13,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742017_1193 (size=12595) 2024-11-20T13:24:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,793 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/dd0f0b26fcf140e88781045cbf1b0377 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/dd0f0b26fcf140e88781045cbf1b0377 2024-11-20T13:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,802 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into dd0f0b26fcf140e88781045cbf1b0377(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:13,802 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:13,802 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=13, startTime=1732109053677; duration=0sec 2024-11-20T13:24:13,802 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:13,802 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:13,802 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,818 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:13,819 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,820 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:13,820 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/babe360a2e7242c28c06f7f684aff579, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=35.9 K 2024-11-20T13:24:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742018_1194 (size=4469) 2024-11-20T13:24:13,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120919223cbd05f41a8b2294daa62a108d0_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109052605/Put/seqid=0 2024-11-20T13:24:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,823 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting babe360a2e7242c28c06f7f684aff579, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732109049013 2024-11-20T13:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,825 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#162 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,826 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/cba90771b4464fb4a09433c1aa169a7c is 175, key is test_row_0/A:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,827 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a3a3440395a42dda8902aa4f97aa3a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732109050191 2024-11-20T13:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,828 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8494a7f7d6f9437d856cef1960ae4d7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:13,878 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#165 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:13,879 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/b37f6856524e40fb9293d20d3c6b0451 is 50, key is test_row_0/C:col10/1732109052596/Put/seqid=0 2024-11-20T13:24:13,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742020_1196 (size=31549) 2024-11-20T13:24:13,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/cba90771b4464fb4a09433c1aa169a7c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c 2024-11-20T13:24:13,935 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into cba90771b4464fb4a09433c1aa169a7c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:13,935 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:13,935 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=13, startTime=1732109053676; duration=0sec 2024-11-20T13:24:13,936 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:13,936 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742019_1195 (size=9814) 2024-11-20T13:24:13,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109113951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742021_1197 (size=12595) 2024-11-20T13:24:13,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109113956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:13,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109113964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109114075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109114076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109114076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109114285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109114287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109114287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:14,348 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120919223cbd05f41a8b2294daa62a108d0_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120919223cbd05f41a8b2294daa62a108d0_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:14,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:14,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 is 175, key is test_row_0/A:col10/1732109052605/Put/seqid=0 2024-11-20T13:24:14,373 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/b37f6856524e40fb9293d20d3c6b0451 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/b37f6856524e40fb9293d20d3c6b0451 2024-11-20T13:24:14,380 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into b37f6856524e40fb9293d20d3c6b0451(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:14,380 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:14,380 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=13, startTime=1732109053678; duration=0sec 2024-11-20T13:24:14,380 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:14,380 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:14,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742022_1198 (size=22461) 2024-11-20T13:24:14,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109114588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109114593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:14,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109114594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:14,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:14,814 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 2024-11-20T13:24:14,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/66f870266198406f8e25c30238972a31 is 50, key is test_row_0/B:col10/1732109052605/Put/seqid=0 2024-11-20T13:24:14,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742023_1199 (size=9757) 2024-11-20T13:24:14,882 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/66f870266198406f8e25c30238972a31 2024-11-20T13:24:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/76c1e28b24d245aebc5a95804926037f is 50, key is test_row_0/C:col10/1732109052605/Put/seqid=0 2024-11-20T13:24:14,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742024_1200 (size=9757) 2024-11-20T13:24:14,961 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/76c1e28b24d245aebc5a95804926037f 2024-11-20T13:24:14,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 2024-11-20T13:24:14,986 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1, entries=100, sequenceid=216, filesize=21.9 K 2024-11-20T13:24:14,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/66f870266198406f8e25c30238972a31 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31 2024-11-20T13:24:14,993 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31, entries=100, sequenceid=216, filesize=9.5 K 2024-11-20T13:24:14,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/76c1e28b24d245aebc5a95804926037f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f 2024-11-20T13:24:15,001 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f, entries=100, sequenceid=216, filesize=9.5 K 2024-11-20T13:24:15,002 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=187.85 KB/192360 for bb1c53ade43f12e473cc15132f34b609 in 1267ms, sequenceid=216, compaction requested=false 2024-11-20T13:24:15,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:15,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:15,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T13:24:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T13:24:15,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T13:24:15,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3490 sec 2024-11-20T13:24:15,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.3530 sec 2024-11-20T13:24:15,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-11-20T13:24:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:15,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:15,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:15,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:15,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:15,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:15,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:15,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109115100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109115102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109115103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201b444fef5e33452db8d485d14f260937_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:15,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109115206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109115206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742025_1201 (size=17284) 2024-11-20T13:24:15,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109115411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109115410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,610 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:15,617 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201b444fef5e33452db8d485d14f260937_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201b444fef5e33452db8d485d14f260937_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:15,618 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/1e973448898a44308bd73f85dd265668, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:15,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/1e973448898a44308bd73f85dd265668 is 175, key is test_row_0/A:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742026_1202 (size=48389) 2024-11-20T13:24:15,633 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=67.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/1e973448898a44308bd73f85dd265668 2024-11-20T13:24:15,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c0f368cdab1c4291bc1da12ce8c2fe72 is 50, key is test_row_0/B:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:15,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742027_1203 (size=12151) 2024-11-20T13:24:15,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c0f368cdab1c4291bc1da12ce8c2fe72 2024-11-20T13:24:15,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109115716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/736793bcbbfd4472818ce33100117af8 is 50, key is test_row_0/C:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:15,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:15,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109115718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:15,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742028_1204 (size=12151) 2024-11-20T13:24:16,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109116115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37068 deadline: 1732109116125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,127 DEBUG [Thread-690 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8303 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:16,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/736793bcbbfd4472818ce33100117af8 2024-11-20T13:24:16,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/1e973448898a44308bd73f85dd265668 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668 2024-11-20T13:24:16,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668, entries=250, sequenceid=252, filesize=47.3 K 2024-11-20T13:24:16,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c0f368cdab1c4291bc1da12ce8c2fe72 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72 2024-11-20T13:24:16,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T13:24:16,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/736793bcbbfd4472818ce33100117af8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8 2024-11-20T13:24:16,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T13:24:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for bb1c53ade43f12e473cc15132f34b609 in 1112ms, sequenceid=252, compaction requested=true 2024-11-20T13:24:16,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,211 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,213 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:16,213 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:16,213 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,213 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=100.0 K 2024-11-20T13:24:16,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,213 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,213 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668] 2024-11-20T13:24:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,214 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cba90771b4464fb4a09433c1aa169a7c, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,214 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8fe3b64db7e412f9fb4bd1fc5f1a8b1, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732109052605 2024-11-20T13:24:16,215 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e973448898a44308bd73f85dd265668, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053933 2024-11-20T13:24:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:16,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:16,217 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:16,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:16,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:16,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:16,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:16,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,219 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:16,220 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:16,220 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,220 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/dd0f0b26fcf140e88781045cbf1b0377, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=33.7 K 2024-11-20T13:24:16,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,220 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting dd0f0b26fcf140e88781045cbf1b0377, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:16,221 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 66f870266198406f8e25c30238972a31, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732109052605 2024-11-20T13:24:16,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,221 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c0f368cdab1c4291bc1da12ce8c2fe72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053954 2024-11-20T13:24:16,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,246 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:16,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,262 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#172 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,263 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/9202ee7524f44ee1b23fd55aed8d98f7 is 50, key is test_row_0/B:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,264 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411205a0a7094a99f459ba25c0f4b46bfe6f2_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,266 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411205a0a7094a99f459ba25c0f4b46bfe6f2_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:16,266 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205a0a7094a99f459ba25c0f4b46bfe6f2_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742029_1205 (size=12697) 2024-11-20T13:24:16,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,308 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/9202ee7524f44ee1b23fd55aed8d98f7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9202ee7524f44ee1b23fd55aed8d98f7 2024-11-20T13:24:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742030_1206 (size=4469) 2024-11-20T13:24:16,339 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into 9202ee7524f44ee1b23fd55aed8d98f7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:16,339 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:16,339 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=13, startTime=1732109056217; duration=0sec 2024-11-20T13:24:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,339 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:16,339 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:16,339 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,345 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#171 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,346 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/4f9185609ede41b2b8e6e3e17950315b is 175, key is test_row_0/A:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:16,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:16,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:16,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:16,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,350 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:16,350 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:16,350 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,350 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/b37f6856524e40fb9293d20d3c6b0451, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=33.7 K 2024-11-20T13:24:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,352 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b37f6856524e40fb9293d20d3c6b0451, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732109051440 2024-11-20T13:24:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,353 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c1e28b24d245aebc5a95804926037f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732109052605 2024-11-20T13:24:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,354 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 736793bcbbfd4472818ce33100117af8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053954 2024-11-20T13:24:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742031_1207 (size=31651) 2024-11-20T13:24:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,403 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:16,404 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/7060f4fc890e4433ad8fd853219d60ea is 50, key is test_row_0/C:col10/1732109055096/Put/seqid=0 2024-11-20T13:24:16,409 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/4f9185609ede41b2b8e6e3e17950315b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b 2024-11-20T13:24:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,417 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into 4f9185609ede41b2b8e6e3e17950315b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:16,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:16,417 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=13, startTime=1732109056211; duration=0sec 2024-11-20T13:24:16,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:16,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9a79a30d3bb48c7886723bed7688531_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109056346/Put/seqid=0 2024-11-20T13:24:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742032_1208 (size=12697) 2024-11-20T13:24:16,479 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/7060f4fc890e4433ad8fd853219d60ea as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/7060f4fc890e4433ad8fd853219d60ea 2024-11-20T13:24:16,487 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into 7060f4fc890e4433ad8fd853219d60ea(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:16,487 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:16,487 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=13, startTime=1732109056218; duration=0sec 2024-11-20T13:24:16,487 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:16,487 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:16,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742034_1210 (size=27648) 2024-11-20T13:24:16,498 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:16,503 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9a79a30d3bb48c7886723bed7688531_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9a79a30d3bb48c7886723bed7688531_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:16,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,504 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a1447da1960546fba1b6ac4f7e45d0d4, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109116499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a1447da1960546fba1b6ac4f7e45d0d4 is 175, key is test_row_0/A:col10/1732109056346/Put/seqid=0 2024-11-20T13:24:16,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109116505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742033_1209 (size=83435) 2024-11-20T13:24:16,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109116606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109116610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T13:24:16,766 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T13:24:16,767 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:16,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T13:24:16,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:16,769 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:16,769 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:16,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:16,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:16,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109116822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109116820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:16,910 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a1447da1960546fba1b6ac4f7e45d0d4 2024-11-20T13:24:16,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:16,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:16,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:16,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:16,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:16,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:16,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/55bfdb26c49b400b9ae888be2a7d81ce is 50, key is test_row_0/B:col10/1732109056346/Put/seqid=0 2024-11-20T13:24:16,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742035_1211 (size=12301) 2024-11-20T13:24:16,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/55bfdb26c49b400b9ae888be2a7d81ce 2024-11-20T13:24:16,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/81cf0dc0e85f4cc490a554bc2927b88d is 50, key is test_row_0/C:col10/1732109056346/Put/seqid=0 2024-11-20T13:24:17,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742036_1212 (size=12301) 2024-11-20T13:24:17,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/81cf0dc0e85f4cc490a554bc2927b88d 2024-11-20T13:24:17,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/a1447da1960546fba1b6ac4f7e45d0d4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4 2024-11-20T13:24:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:17,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4, entries=450, sequenceid=265, filesize=81.5 K 2024-11-20T13:24:17,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/55bfdb26c49b400b9ae888be2a7d81ce as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce 2024-11-20T13:24:17,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce, entries=150, sequenceid=265, filesize=12.0 K 2024-11-20T13:24:17,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/81cf0dc0e85f4cc490a554bc2927b88d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d 2024-11-20T13:24:17,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d, entries=150, sequenceid=265, filesize=12.0 K 2024-11-20T13:24:17,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for bb1c53ade43f12e473cc15132f34b609 in 739ms, sequenceid=265, compaction requested=false 2024-11-20T13:24:17,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:17,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:17,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109117160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120602fca6d2b58497dac32b35a72538266_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:17,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109117166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742037_1213 (size=14994) 2024-11-20T13:24:17,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109117271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109117281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:17,383 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109117483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109117487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,605 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:17,611 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120602fca6d2b58497dac32b35a72538266_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120602fca6d2b58497dac32b35a72538266_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:17,612 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/b47a69e3361141cfb1bc9610ba6e28aa, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:17,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/b47a69e3361141cfb1bc9610ba6e28aa is 175, key is test_row_0/A:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:17,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742038_1214 (size=39949) 2024-11-20T13:24:17,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109117791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:17,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109117798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:17,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:17,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:17,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:17,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:17,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:17,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,060 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/b47a69e3361141cfb1bc9610ba6e28aa 2024-11-20T13:24:18,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c3a8c69ff6e24da9b78c2abb2059b516 is 50, key is test_row_0/B:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:18,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742039_1215 (size=12301) 2024-11-20T13:24:18,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c3a8c69ff6e24da9b78c2abb2059b516 2024-11-20T13:24:18,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:18,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109118120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,123 DEBUG [Thread-682 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:18,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/2d98b9e6b9d5480ab1fefbbd8feb2898 is 50, key is test_row_0/C:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:18,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:18,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:18,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742040_1216 (size=12301) 2024-11-20T13:24:18,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:18,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109118299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:18,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109118300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:18,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:18,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,458 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/2d98b9e6b9d5480ab1fefbbd8feb2898 2024-11-20T13:24:18,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/b47a69e3361141cfb1bc9610ba6e28aa as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa 2024-11-20T13:24:18,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa, entries=200, sequenceid=293, filesize=39.0 K 2024-11-20T13:24:18,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c3a8c69ff6e24da9b78c2abb2059b516 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516 2024-11-20T13:24:18,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T13:24:18,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/2d98b9e6b9d5480ab1fefbbd8feb2898 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898 2024-11-20T13:24:18,612 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T13:24:18,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:18,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:18,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for bb1c53ade43f12e473cc15132f34b609 in 1499ms, sequenceid=293, compaction requested=true 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:18,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:24:18,630 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:18,630 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:18,632 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155035 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:18,632 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:18,632 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,632 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=151.4 K 2024-11-20T13:24:18,632 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,632 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa] 2024-11-20T13:24:18,632 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:18,632 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:18,632 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,633 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/7060f4fc890e4433ad8fd853219d60ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=36.4 K 2024-11-20T13:24:18,633 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7060f4fc890e4433ad8fd853219d60ea, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053954 2024-11-20T13:24:18,633 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f9185609ede41b2b8e6e3e17950315b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053954 2024-11-20T13:24:18,635 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 81cf0dc0e85f4cc490a554bc2927b88d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732109056293 2024-11-20T13:24:18,635 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1447da1960546fba1b6ac4f7e45d0d4, keycount=450, bloomtype=ROW, size=81.5 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732109056243 2024-11-20T13:24:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:18,636 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d98b9e6b9d5480ab1fefbbd8feb2898, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056482 2024-11-20T13:24:18,636 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b47a69e3361141cfb1bc9610ba6e28aa, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056480 2024-11-20T13:24:18,660 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:18,666 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#181 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:18,666 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/21de38980cd14e11803bbac37a511004 is 50, key is test_row_0/C:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:18,681 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120acb15ff68cfc4656b1368a2ad31300bd_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:18,683 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120acb15ff68cfc4656b1368a2ad31300bd_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:18,683 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120acb15ff68cfc4656b1368a2ad31300bd_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:18,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742041_1217 (size=12949) 2024-11-20T13:24:18,749 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/21de38980cd14e11803bbac37a511004 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/21de38980cd14e11803bbac37a511004 2024-11-20T13:24:18,761 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into 21de38980cd14e11803bbac37a511004(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:18,761 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:18,761 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=13, startTime=1732109058629; duration=0sec 2024-11-20T13:24:18,761 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:18,761 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:18,761 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:18,765 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:18,766 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:18,766 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,766 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9202ee7524f44ee1b23fd55aed8d98f7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=36.4 K 2024-11-20T13:24:18,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742042_1218 (size=4469) 2024-11-20T13:24:18,767 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9202ee7524f44ee1b23fd55aed8d98f7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109053954 2024-11-20T13:24:18,767 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 55bfdb26c49b400b9ae888be2a7d81ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732109056293 2024-11-20T13:24:18,768 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c3a8c69ff6e24da9b78c2abb2059b516, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056482 2024-11-20T13:24:18,777 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#180 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:18,777 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/abdf41c406f3424bb43ebcc8fb28c0fd is 175, key is test_row_0/A:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:18,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:18,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T13:24:18,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:18,788 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T13:24:18,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:18,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:18,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:18,797 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#182 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:18,798 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/65b1e9c58d9742628b89f93c7458588b is 50, key is test_row_0/B:col10/1732109056482/Put/seqid=0 2024-11-20T13:24:18,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742043_1219 (size=31903) 2024-11-20T13:24:18,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207d5e9acc5fca43f99aa0506a4762ef21_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109057164/Put/seqid=0 2024-11-20T13:24:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:18,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742044_1220 (size=12949) 2024-11-20T13:24:18,904 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/65b1e9c58d9742628b89f93c7458588b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/65b1e9c58d9742628b89f93c7458588b 2024-11-20T13:24:18,915 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into 65b1e9c58d9742628b89f93c7458588b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:18,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:18,915 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=13, startTime=1732109058629; duration=0sec 2024-11-20T13:24:18,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:18,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742045_1221 (size=12454) 2024-11-20T13:24:19,282 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/abdf41c406f3424bb43ebcc8fb28c0fd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd 2024-11-20T13:24:19,299 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into abdf41c406f3424bb43ebcc8fb28c0fd(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:19,299 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:19,299 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=13, startTime=1732109058629; duration=0sec 2024-11-20T13:24:19,299 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:19,299 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:19,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:19,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:19,328 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207d5e9acc5fca43f99aa0506a4762ef21_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d5e9acc5fca43f99aa0506a4762ef21_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:19,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/f88850a2a0d8445b9eec3c1e9e0f18a5, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:19,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/f88850a2a0d8445b9eec3c1e9e0f18a5 is 175, key is test_row_0/A:col10/1732109057164/Put/seqid=0 2024-11-20T13:24:19,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742046_1222 (size=31255) 2024-11-20T13:24:19,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109119369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109119374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109119475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109119479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109119681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109119682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,772 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/f88850a2a0d8445b9eec3c1e9e0f18a5 2024-11-20T13:24:19,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/467955aa4e4b4283994f5a2ff19037eb is 50, key is test_row_0/B:col10/1732109057164/Put/seqid=0 2024-11-20T13:24:19,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742047_1223 (size=12301) 2024-11-20T13:24:19,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109119983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:19,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109119984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:20,244 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/467955aa4e4b4283994f5a2ff19037eb 2024-11-20T13:24:20,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/051fe4b1be4949bd94aa649945edced9 is 50, key is test_row_0/C:col10/1732109057164/Put/seqid=0 2024-11-20T13:24:20,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742048_1224 (size=12301) 2024-11-20T13:24:20,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109120496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:20,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109120496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:20,736 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/051fe4b1be4949bd94aa649945edced9 2024-11-20T13:24:20,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/f88850a2a0d8445b9eec3c1e9e0f18a5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5 2024-11-20T13:24:20,751 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5, entries=150, sequenceid=304, filesize=30.5 K 2024-11-20T13:24:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/467955aa4e4b4283994f5a2ff19037eb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb 2024-11-20T13:24:20,769 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb, entries=150, sequenceid=304, filesize=12.0 K 2024-11-20T13:24:20,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/051fe4b1be4949bd94aa649945edced9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9 2024-11-20T13:24:20,780 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9, entries=150, sequenceid=304, filesize=12.0 K 2024-11-20T13:24:20,781 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for bb1c53ade43f12e473cc15132f34b609 in 1993ms, sequenceid=304, compaction requested=false 2024-11-20T13:24:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:20,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T13:24:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T13:24:20,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T13:24:20,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0180 sec 2024-11-20T13:24:20,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 4.0240 sec 2024-11-20T13:24:20,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T13:24:20,886 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T13:24:20,887 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:20,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-20T13:24:20,890 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:20,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:20,891 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:20,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:20,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:21,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T13:24:21,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:21,045 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:21,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:21,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:21,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:21,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:21,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:21,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e9badc8aa8364fd69d345f16697defc9_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109059365/Put/seqid=0 2024-11-20T13:24:21,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742049_1225 (size=12454) 2024-11-20T13:24:21,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:21,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. as already flushing 2024-11-20T13:24:21,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109121519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109121520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:21,546 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e9badc8aa8364fd69d345f16697defc9_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9badc8aa8364fd69d345f16697defc9_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:21,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/dd33040f92964c218c2c6709521c3486, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:21,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/dd33040f92964c218c2c6709521c3486 is 175, key is test_row_0/A:col10/1732109059365/Put/seqid=0 2024-11-20T13:24:21,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742050_1226 (size=31255) 2024-11-20T13:24:21,602 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/dd33040f92964c218c2c6709521c3486 2024-11-20T13:24:21,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/573add6e68f14434b583c8bd12689706 is 50, key is test_row_0/B:col10/1732109059365/Put/seqid=0 2024-11-20T13:24:21,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109121622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109121624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742051_1227 (size=12301) 2024-11-20T13:24:21,829 DEBUG [Thread-693 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51c9d640 to 127.0.0.1:53074 2024-11-20T13:24:21,829 DEBUG [Thread-693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:21,841 DEBUG [Thread-695 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12d65163 to 127.0.0.1:53074 2024-11-20T13:24:21,841 DEBUG [Thread-695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:21,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109121831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109121837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:21,851 DEBUG [Thread-699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bc461df to 127.0.0.1:53074 2024-11-20T13:24:21,851 DEBUG [Thread-699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:21,853 DEBUG [Thread-697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e20f1cc to 127.0.0.1:53074 2024-11-20T13:24:21,853 DEBUG [Thread-697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:21,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:22,062 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/573add6e68f14434b583c8bd12689706 2024-11-20T13:24:22,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/18a565df8a3742519c1b4585d6bbef02 is 50, key is test_row_0/C:col10/1732109059365/Put/seqid=0 2024-11-20T13:24:22,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742052_1228 (size=12301) 2024-11-20T13:24:22,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:22,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37026 deadline: 1732109122149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:22,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37042 deadline: 1732109122156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:22,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37078 deadline: 1732109122157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:22,161 DEBUG [Thread-682 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8205 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:22,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/18a565df8a3742519c1b4585d6bbef02 2024-11-20T13:24:22,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/dd33040f92964c218c2c6709521c3486 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486 2024-11-20T13:24:22,502 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486, entries=150, sequenceid=332, filesize=30.5 K 2024-11-20T13:24:22,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/573add6e68f14434b583c8bd12689706 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706 2024-11-20T13:24:22,507 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T13:24:22,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/18a565df8a3742519c1b4585d6bbef02 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02 2024-11-20T13:24:22,516 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T13:24:22,518 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for bb1c53ade43f12e473cc15132f34b609 in 1473ms, sequenceid=332, compaction requested=true 2024-11-20T13:24:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-20T13:24:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-20T13:24:22,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T13:24:22,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6280 sec 2024-11-20T13:24:22,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.6360 sec 2024-11-20T13:24:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:22,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:24:22,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:22,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:22,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:22,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:22,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:22,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:22,657 DEBUG [Thread-686 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d90494b to 127.0.0.1:53074 2024-11-20T13:24:22,657 DEBUG [Thread-686 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:22,662 DEBUG [Thread-688 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a57be6e to 127.0.0.1:53074 2024-11-20T13:24:22,662 DEBUG [Thread-688 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:22,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c82661964bd4f878d0787491a166374_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:22,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742053_1229 (size=12454) 2024-11-20T13:24:23,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T13:24:23,008 INFO [Thread-692 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T13:24:23,092 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:23,118 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c82661964bd4f878d0787491a166374_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c82661964bd4f878d0787491a166374_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:23,127 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/9403141fe34341738000f1ec1748385e, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:23,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/9403141fe34341738000f1ec1748385e is 175, key is test_row_0/A:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:23,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742054_1230 (size=31255) 2024-11-20T13:24:23,267 DEBUG [Thread-684 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61722a7c to 127.0.0.1:53074 2024-11-20T13:24:23,267 DEBUG [Thread-684 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:23,575 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=344, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/9403141fe34341738000f1ec1748385e 2024-11-20T13:24:23,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a78f9dcb156845b4b1458a7e0314caef is 50, key is test_row_0/B:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:23,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742055_1231 (size=12301) 2024-11-20T13:24:23,669 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:24:23,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a78f9dcb156845b4b1458a7e0314caef 2024-11-20T13:24:23,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e210cd2cc28c4a2d9fe537e5221eb589 is 50, key is test_row_0/C:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:23,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742056_1232 (size=12301) 2024-11-20T13:24:24,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e210cd2cc28c4a2d9fe537e5221eb589 2024-11-20T13:24:24,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/9403141fe34341738000f1ec1748385e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e 2024-11-20T13:24:24,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e, entries=150, sequenceid=344, filesize=30.5 K 2024-11-20T13:24:24,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/a78f9dcb156845b4b1458a7e0314caef as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef 2024-11-20T13:24:24,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef, entries=150, sequenceid=344, filesize=12.0 K 2024-11-20T13:24:24,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/e210cd2cc28c4a2d9fe537e5221eb589 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589 2024-11-20T13:24:24,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589, entries=150, sequenceid=344, filesize=12.0 K 2024-11-20T13:24:24,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=13.42 KB/13740 for bb1c53ade43f12e473cc15132f34b609 in 1760ms, sequenceid=344, compaction requested=true 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:24,416 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:24,416 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb1c53ade43f12e473cc15132f34b609:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:24,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:24,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:24,417 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/B is initiating minor compaction (all files) 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/A is initiating minor compaction (all files) 2024-11-20T13:24:24,418 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/B in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:24,418 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/A in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:24,418 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/65b1e9c58d9742628b89f93c7458588b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=48.7 K 2024-11-20T13:24:24,418 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=122.7 K 2024-11-20T13:24:24,418 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e] 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 65b1e9c58d9742628b89f93c7458588b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056482 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting abdf41c406f3424bb43ebcc8fb28c0fd, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056482 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 467955aa4e4b4283994f5a2ff19037eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732109057154 2024-11-20T13:24:24,418 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting f88850a2a0d8445b9eec3c1e9e0f18a5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732109057154 2024-11-20T13:24:24,419 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 573add6e68f14434b583c8bd12689706, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109059365 2024-11-20T13:24:24,419 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd33040f92964c218c2c6709521c3486, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109059365 2024-11-20T13:24:24,419 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a78f9dcb156845b4b1458a7e0314caef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732109061509 2024-11-20T13:24:24,419 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9403141fe34341738000f1ec1748385e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732109061509 2024-11-20T13:24:24,427 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:24,429 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:24,430 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/0c4e97f2875c4348b4be0c76a5a35347 is 50, key is test_row_0/B:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:24,430 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202690daab95ee4337ab8d76e7902f2b8f_bb1c53ade43f12e473cc15132f34b609 store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742057_1233 (size=13085) 2024-11-20T13:24:24,443 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/0c4e97f2875c4348b4be0c76a5a35347 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0c4e97f2875c4348b4be0c76a5a35347 2024-11-20T13:24:24,449 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/B of bb1c53ade43f12e473cc15132f34b609 into 0c4e97f2875c4348b4be0c76a5a35347(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:24,449 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:24,449 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/B, priority=12, startTime=1732109064416; duration=0sec 2024-11-20T13:24:24,449 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:24,449 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:B 2024-11-20T13:24:24,449 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:24,450 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202690daab95ee4337ab8d76e7902f2b8f_bb1c53ade43f12e473cc15132f34b609, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:24,450 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202690daab95ee4337ab8d76e7902f2b8f_bb1c53ade43f12e473cc15132f34b609 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:24,451 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:24,451 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): bb1c53ade43f12e473cc15132f34b609/C is initiating minor compaction (all files) 2024-11-20T13:24:24,451 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb1c53ade43f12e473cc15132f34b609/C in TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:24,451 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/21de38980cd14e11803bbac37a511004, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp, totalSize=48.7 K 2024-11-20T13:24:24,451 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 21de38980cd14e11803bbac37a511004, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732109056482 2024-11-20T13:24:24,452 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 051fe4b1be4949bd94aa649945edced9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732109057154 2024-11-20T13:24:24,452 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 18a565df8a3742519c1b4585d6bbef02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109059365 2024-11-20T13:24:24,452 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e210cd2cc28c4a2d9fe537e5221eb589, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732109061509 2024-11-20T13:24:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742058_1234 (size=4469) 2024-11-20T13:24:24,463 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#C#compaction#194 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:24,464 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/9f5e32661dae4429ba466aa34d68861d is 50, key is test_row_0/C:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742059_1235 (size=13085) 2024-11-20T13:24:24,473 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/9f5e32661dae4429ba466aa34d68861d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/9f5e32661dae4429ba466aa34d68861d 2024-11-20T13:24:24,477 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/C of bb1c53ade43f12e473cc15132f34b609 into 9f5e32661dae4429ba466aa34d68861d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:24,477 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:24,477 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/C, priority=12, startTime=1732109064416; duration=0sec 2024-11-20T13:24:24,478 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:24,478 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:C 2024-11-20T13:24:24,855 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb1c53ade43f12e473cc15132f34b609#A#compaction#193 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:24,856 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5b21fac71b0a4e339f6b36c433d3a0e7 is 175, key is test_row_0/A:col10/1732109061509/Put/seqid=0 2024-11-20T13:24:24,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742060_1236 (size=32039) 2024-11-20T13:24:25,266 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/5b21fac71b0a4e339f6b36c433d3a0e7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5b21fac71b0a4e339f6b36c433d3a0e7 2024-11-20T13:24:25,271 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb1c53ade43f12e473cc15132f34b609/A of bb1c53ade43f12e473cc15132f34b609 into 5b21fac71b0a4e339f6b36c433d3a0e7(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:25,271 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:25,271 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609., storeName=bb1c53ade43f12e473cc15132f34b609/A, priority=12, startTime=1732109064416; duration=0sec 2024-11-20T13:24:25,271 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:25,271 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb1c53ade43f12e473cc15132f34b609:A 2024-11-20T13:24:26,133 DEBUG [Thread-690 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x554f4590 to 127.0.0.1:53074 2024-11-20T13:24:26,133 DEBUG [Thread-690 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:29,470 DEBUG [master/5ef453f0fbb6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2b8ebee7bfda350373a6614eb33b4fd3 changed from -1.0 to 0.0, refreshing cache 2024-11-20T13:24:32,256 DEBUG [Thread-682 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x21c6f06d to 127.0.0.1:53074 2024-11-20T13:24:32,256 DEBUG [Thread-682 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3127 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3146 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1340 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4020 rows 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1335 2024-11-20T13:24:32,257 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4005 rows 2024-11-20T13:24:32,257 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:24:32,257 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x509dd4f9 to 127.0.0.1:53074 2024-11-20T13:24:32,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:32,264 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:24:32,268 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:24:32,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:32,272 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109072271"}]},"ts":"1732109072271"} 2024-11-20T13:24:32,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:32,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:24:32,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:32,508 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:24:32,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:24:32,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, UNASSIGN}] 2024-11-20T13:24:32,512 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, UNASSIGN 2024-11-20T13:24:32,512 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:32,514 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:24:32,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; CloseRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:24:32,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:32,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:32,666 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(124): Close bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1681): Closing bb1c53ade43f12e473cc15132f34b609, disabling compactions & flushes 2024-11-20T13:24:32,667 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. after waiting 0 ms 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:32,667 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(2837): Flushing bb1c53ade43f12e473cc15132f34b609 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=A 2024-11-20T13:24:32,667 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:32,668 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=B 2024-11-20T13:24:32,668 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:32,668 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb1c53ade43f12e473cc15132f34b609, store=C 2024-11-20T13:24:32,668 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:32,677 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208bec7bbae698449e87082483d60f4384_bb1c53ade43f12e473cc15132f34b609 is 50, key is test_row_0/A:col10/1732109063260/Put/seqid=0 2024-11-20T13:24:32,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742061_1237 (size=12454) 2024-11-20T13:24:32,691 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:32,699 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208bec7bbae698449e87082483d60f4384_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208bec7bbae698449e87082483d60f4384_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:32,700 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/35bde3e9cbba4cf1ade82831d5f8ecfd, store: [table=TestAcidGuarantees family=A region=bb1c53ade43f12e473cc15132f34b609] 2024-11-20T13:24:32,701 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/35bde3e9cbba4cf1ade82831d5f8ecfd is 175, key is test_row_0/A:col10/1732109063260/Put/seqid=0 2024-11-20T13:24:32,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742062_1238 (size=31255) 2024-11-20T13:24:32,711 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=354, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/35bde3e9cbba4cf1ade82831d5f8ecfd 2024-11-20T13:24:32,722 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c4cee7d4e712456fbf3603bd84459489 is 50, key is test_row_0/B:col10/1732109063260/Put/seqid=0 2024-11-20T13:24:32,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742063_1239 (size=12301) 2024-11-20T13:24:32,732 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c4cee7d4e712456fbf3603bd84459489 2024-11-20T13:24:32,762 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8216c61cc34e49c689de42d1503d0f76 is 50, key is test_row_0/C:col10/1732109063260/Put/seqid=0 2024-11-20T13:24:32,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742064_1240 (size=12301) 2024-11-20T13:24:32,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:33,172 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8216c61cc34e49c689de42d1503d0f76 2024-11-20T13:24:33,178 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/A/35bde3e9cbba4cf1ade82831d5f8ecfd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/35bde3e9cbba4cf1ade82831d5f8ecfd 2024-11-20T13:24:33,183 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/35bde3e9cbba4cf1ade82831d5f8ecfd, entries=150, sequenceid=354, filesize=30.5 K 2024-11-20T13:24:33,185 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/B/c4cee7d4e712456fbf3603bd84459489 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c4cee7d4e712456fbf3603bd84459489 2024-11-20T13:24:33,189 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c4cee7d4e712456fbf3603bd84459489, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T13:24:33,191 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/.tmp/C/8216c61cc34e49c689de42d1503d0f76 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8216c61cc34e49c689de42d1503d0f76 2024-11-20T13:24:33,196 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8216c61cc34e49c689de42d1503d0f76, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T13:24:33,198 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for bb1c53ade43f12e473cc15132f34b609 in 530ms, sequenceid=354, compaction requested=false 2024-11-20T13:24:33,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e] to archive 2024-11-20T13:24:33,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:24:33,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5212d0a138c1416599c0a07a0f36d8ec 2024-11-20T13:24:33,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/56ff408a3eb34472aa10d74ef3985902 2024-11-20T13:24:33,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/67f1812bf7c34800807de799425297b6 2024-11-20T13:24:33,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/bae77b3a9a6b4e0f8fbcb704e3af427a 2024-11-20T13:24:33,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/6f9ccaadd26a4f468c10534e7123287c 2024-11-20T13:24:33,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/96fd4d9ac58e47098c2b69d86ec9786e 2024-11-20T13:24:33,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/08b69f80e3484befbad9ff922828339f 2024-11-20T13:24:33,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/93693d780742415bb3a382cb78a5df77 2024-11-20T13:24:33,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/263e769281a145b09771bf9f6d415bf1 2024-11-20T13:24:33,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/295d6df7ee79475e9a1caadc2b4e9d7a 2024-11-20T13:24:33,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/72ba206b1ba14500887d13f607f97606 2024-11-20T13:24:33,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/52e206a5338943bdb6b1b3d5e78b75fe 2024-11-20T13:24:33,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5935fb252ea64a0eaacf7cd971fd1741 2024-11-20T13:24:33,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/cba90771b4464fb4a09433c1aa169a7c 2024-11-20T13:24:33,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a8fe3b64db7e412f9fb4bd1fc5f1a8b1 2024-11-20T13:24:33,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/1e973448898a44308bd73f85dd265668 2024-11-20T13:24:33,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/4f9185609ede41b2b8e6e3e17950315b 2024-11-20T13:24:33,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/a1447da1960546fba1b6ac4f7e45d0d4 2024-11-20T13:24:33,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/b47a69e3361141cfb1bc9610ba6e28aa 2024-11-20T13:24:33,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/abdf41c406f3424bb43ebcc8fb28c0fd 2024-11-20T13:24:33,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/f88850a2a0d8445b9eec3c1e9e0f18a5 2024-11-20T13:24:33,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/dd33040f92964c218c2c6709521c3486 2024-11-20T13:24:33,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/9403141fe34341738000f1ec1748385e 2024-11-20T13:24:33,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/56eb81d9cd524f0898dd99d22c16cbb1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/f9daf1715e1d4db9876e0a277492a860, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9e6294bf83c949c7baf563b1fe3b9c28, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/dd0f0b26fcf140e88781045cbf1b0377, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9202ee7524f44ee1b23fd55aed8d98f7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/65b1e9c58d9742628b89f93c7458588b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef] to archive 2024-11-20T13:24:33,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:24:33,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/1f2042f4c2fe44a6a0ee8a6047b56b7a 2024-11-20T13:24:33,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/51c9d383682c492fa8f3e0ebb01e9ee8 2024-11-20T13:24:33,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/5b67c324c30342359a0de04b293bc8dd 2024-11-20T13:24:33,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/56eb81d9cd524f0898dd99d22c16cbb1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/56eb81d9cd524f0898dd99d22c16cbb1 2024-11-20T13:24:33,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a9d3fbd3dfc84eb883c64ee074355ebd 2024-11-20T13:24:33,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/15ceb5620a204449836074b89be37d61 2024-11-20T13:24:33,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/f9daf1715e1d4db9876e0a277492a860 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/f9daf1715e1d4db9876e0a277492a860 2024-11-20T13:24:33,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/ddf22ec5b0604a11ab49e3ddb14cd6db 2024-11-20T13:24:33,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/28145851c292481889972c3b2d7427ea 2024-11-20T13:24:33,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9e6294bf83c949c7baf563b1fe3b9c28 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9e6294bf83c949c7baf563b1fe3b9c28 2024-11-20T13:24:33,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/87501b3a48a64cb08eb15dfe1a6875a2 2024-11-20T13:24:33,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a527268b231b4d0b8e47615259ce9adf 2024-11-20T13:24:33,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/dd0f0b26fcf140e88781045cbf1b0377 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/dd0f0b26fcf140e88781045cbf1b0377 2024-11-20T13:24:33,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0979c0beb64b4b8ab642113dc82a457f 2024-11-20T13:24:33,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/66f870266198406f8e25c30238972a31 2024-11-20T13:24:33,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9202ee7524f44ee1b23fd55aed8d98f7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/9202ee7524f44ee1b23fd55aed8d98f7 2024-11-20T13:24:33,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c0f368cdab1c4291bc1da12ce8c2fe72 2024-11-20T13:24:33,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/55bfdb26c49b400b9ae888be2a7d81ce 2024-11-20T13:24:33,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/65b1e9c58d9742628b89f93c7458588b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/65b1e9c58d9742628b89f93c7458588b 2024-11-20T13:24:33,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c3a8c69ff6e24da9b78c2abb2059b516 2024-11-20T13:24:33,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/467955aa4e4b4283994f5a2ff19037eb 2024-11-20T13:24:33,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/573add6e68f14434b583c8bd12689706 2024-11-20T13:24:33,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/a78f9dcb156845b4b1458a7e0314caef 2024-11-20T13:24:33,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/393e83c501fc4fb188c1d8a8ee1e5145, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/cc17287eaf8c48e9b2cd8721037ab1a0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/babe360a2e7242c28c06f7f684aff579, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/b37f6856524e40fb9293d20d3c6b0451, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/7060f4fc890e4433ad8fd853219d60ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/21de38980cd14e11803bbac37a511004, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589] to archive 2024-11-20T13:24:33,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:24:33,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/45284dcc76864d94ad428872f890456a 2024-11-20T13:24:33,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/c0bba01cc4cb4537a826ecd66d6566b4 2024-11-20T13:24:33,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/a8e50c0753274b8787c5a4d30f3f15d4 2024-11-20T13:24:33,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/393e83c501fc4fb188c1d8a8ee1e5145 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/393e83c501fc4fb188c1d8a8ee1e5145 2024-11-20T13:24:33,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/d6dd816ac85e4d13baf6cfdc60e2e5f4 2024-11-20T13:24:33,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/f3cfab69a7b840aeba9d7d3437eca4bc 2024-11-20T13:24:33,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/cc17287eaf8c48e9b2cd8721037ab1a0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/cc17287eaf8c48e9b2cd8721037ab1a0 2024-11-20T13:24:33,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e2737301045444be96b9fc3504854d63 2024-11-20T13:24:33,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/6c3d9f7e285d41c0abdabda54720c350 2024-11-20T13:24:33,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/babe360a2e7242c28c06f7f684aff579 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/babe360a2e7242c28c06f7f684aff579 2024-11-20T13:24:33,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8705ae741b734b788a49a2082d34ae7c 2024-11-20T13:24:33,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8a3a3440395a42dda8902aa4f97aa3a2 2024-11-20T13:24:33,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/b37f6856524e40fb9293d20d3c6b0451 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/b37f6856524e40fb9293d20d3c6b0451 2024-11-20T13:24:33,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8494a7f7d6f9437d856cef1960ae4d7d 2024-11-20T13:24:33,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/76c1e28b24d245aebc5a95804926037f 2024-11-20T13:24:33,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/7060f4fc890e4433ad8fd853219d60ea to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/7060f4fc890e4433ad8fd853219d60ea 2024-11-20T13:24:33,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/736793bcbbfd4472818ce33100117af8 2024-11-20T13:24:33,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/81cf0dc0e85f4cc490a554bc2927b88d 2024-11-20T13:24:33,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/21de38980cd14e11803bbac37a511004 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/21de38980cd14e11803bbac37a511004 2024-11-20T13:24:33,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/2d98b9e6b9d5480ab1fefbbd8feb2898 2024-11-20T13:24:33,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/051fe4b1be4949bd94aa649945edced9 2024-11-20T13:24:33,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/18a565df8a3742519c1b4585d6bbef02 2024-11-20T13:24:33,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/e210cd2cc28c4a2d9fe537e5221eb589 2024-11-20T13:24:33,323 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits/357.seqid, newMaxSeqId=357, maxSeqId=4 2024-11-20T13:24:33,324 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609. 2024-11-20T13:24:33,324 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1635): Region close journal for bb1c53ade43f12e473cc15132f34b609: 2024-11-20T13:24:33,326 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(170): Closed bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:33,327 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=bb1c53ade43f12e473cc15132f34b609, regionState=CLOSED 2024-11-20T13:24:33,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T13:24:33,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseRegionProcedure bb1c53ade43f12e473cc15132f34b609, server=5ef453f0fbb6,46739,1732109006137 in 814 msec 2024-11-20T13:24:33,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-20T13:24:33,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb1c53ade43f12e473cc15132f34b609, UNASSIGN in 818 msec 2024-11-20T13:24:33,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T13:24:33,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 823 msec 2024-11-20T13:24:33,335 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109073334"}]},"ts":"1732109073334"} 2024-11-20T13:24:33,336 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:24:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:33,464 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:24:33,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1970 sec 2024-11-20T13:24:34,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T13:24:34,379 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-20T13:24:34,379 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:24:34,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,381 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T13:24:34,382 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=69, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,384 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,387 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits] 2024-11-20T13:24:34,390 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/35bde3e9cbba4cf1ade82831d5f8ecfd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/35bde3e9cbba4cf1ade82831d5f8ecfd 2024-11-20T13:24:34,392 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5b21fac71b0a4e339f6b36c433d3a0e7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/A/5b21fac71b0a4e339f6b36c433d3a0e7 2024-11-20T13:24:34,395 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0c4e97f2875c4348b4be0c76a5a35347 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/0c4e97f2875c4348b4be0c76a5a35347 2024-11-20T13:24:34,396 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c4cee7d4e712456fbf3603bd84459489 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/B/c4cee7d4e712456fbf3603bd84459489 2024-11-20T13:24:34,399 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8216c61cc34e49c689de42d1503d0f76 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/8216c61cc34e49c689de42d1503d0f76 2024-11-20T13:24:34,401 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/9f5e32661dae4429ba466aa34d68861d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/C/9f5e32661dae4429ba466aa34d68861d 2024-11-20T13:24:34,404 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits/357.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609/recovered.edits/357.seqid 2024-11-20T13:24:34,405 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,405 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:24:34,405 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:24:34,406 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T13:24:34,411 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001cd2cb43a04407cb994cffc440bd2a9_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112001cd2cb43a04407cb994cffc440bd2a9_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,413 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004aa1bcc6ca14ac8b5f20e356dffa139_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004aa1bcc6ca14ac8b5f20e356dffa139_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,414 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201b444fef5e33452db8d485d14f260937_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201b444fef5e33452db8d485d14f260937_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,416 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203f7b5e8217224783ba89d3db13e5478a_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203f7b5e8217224783ba89d3db13e5478a_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,417 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120575ab2b54c4d4526a43fff8a990a7743_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120575ab2b54c4d4526a43fff8a990a7743_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,418 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120602fca6d2b58497dac32b35a72538266_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120602fca6d2b58497dac32b35a72538266_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,420 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120656a2192815f4746ae095632c3d91abf_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120656a2192815f4746ae095632c3d91abf_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,421 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c82661964bd4f878d0787491a166374_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c82661964bd4f878d0787491a166374_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,423 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d5e9acc5fca43f99aa0506a4762ef21_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d5e9acc5fca43f99aa0506a4762ef21_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,424 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f6698f8c92344e79c9308f62046359d_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f6698f8c92344e79c9308f62046359d_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,425 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208bec7bbae698449e87082483d60f4384_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208bec7bbae698449e87082483d60f4384_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,427 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120919223cbd05f41a8b2294daa62a108d0_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120919223cbd05f41a8b2294daa62a108d0_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,428 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209779dc3a44e84dda9428ce837df485c3_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209779dc3a44e84dda9428ce837df485c3_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,429 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a298e74f6ece49ee9aadb9e3256b47ed_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a298e74f6ece49ee9aadb9e3256b47ed_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,431 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9badc8aa8364fd69d345f16697defc9_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e9badc8aa8364fd69d345f16697defc9_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,432 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9a79a30d3bb48c7886723bed7688531_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9a79a30d3bb48c7886723bed7688531_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,434 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fba122482ca74759b30b8ee40d6de440_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fba122482ca74759b30b8ee40d6de440_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,436 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd0e8cef9518479f9c7b58bb1f9cd3ca_bb1c53ade43f12e473cc15132f34b609 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fd0e8cef9518479f9c7b58bb1f9cd3ca_bb1c53ade43f12e473cc15132f34b609 2024-11-20T13:24:34,437 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:24:34,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=69, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,443 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:24:34,446 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:24:34,447 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=69, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,447 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:24:34,447 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109074447"}]},"ts":"9223372036854775807"} 2024-11-20T13:24:34,454 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:24:34,454 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => bb1c53ade43f12e473cc15132f34b609, NAME => 'TestAcidGuarantees,,1732109038487.bb1c53ade43f12e473cc15132f34b609.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:24:34,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:24:34,454 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109074454"}]},"ts":"9223372036854775807"} 2024-11-20T13:24:34,456 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:24:34,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T13:24:34,618 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=69, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,619 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 238 msec 2024-11-20T13:24:34,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T13:24:34,685 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-20T13:24:34,698 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=237 (was 239), OpenFileDescriptor=451 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1066 (was 1156), ProcessCount=11 (was 11), AvailableMemoryMB=543 (was 883) 2024-11-20T13:24:34,709 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=1066, ProcessCount=11, AvailableMemoryMB=542 2024-11-20T13:24:34,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:24:34,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:24:34,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:24:34,713 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:24:34,713 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:34,713 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 70 2024-11-20T13:24:34,714 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:24:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:34,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742065_1241 (size=963) 2024-11-20T13:24:34,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:35,123 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:24:35,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742066_1242 (size=53) 2024-11-20T13:24:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:35,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:24:35,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7b01f50ebe5529b12faadacb91472f69, disabling compactions & flushes 2024-11-20T13:24:35,552 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:35,552 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:35,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. after waiting 0 ms 2024-11-20T13:24:35,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:35,553 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:35,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:35,560 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:24:35,560 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109075560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109075560"}]},"ts":"1732109075560"} 2024-11-20T13:24:35,565 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:24:35,566 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:24:35,566 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109075566"}]},"ts":"1732109075566"} 2024-11-20T13:24:35,567 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:24:35,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T13:24:35,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, ASSIGN}] 2024-11-20T13:24:35,758 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, ASSIGN 2024-11-20T13:24:35,759 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:24:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:35,912 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=7b01f50ebe5529b12faadacb91472f69, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:35,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; OpenRegionProcedure 7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:24:36,081 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:36,093 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:36,093 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7285): Opening region: {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:24:36,093 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,093 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:24:36,094 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7327): checking encryption for 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,094 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7330): checking classloading for 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,112 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,136 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:36,136 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b01f50ebe5529b12faadacb91472f69 columnFamilyName A 2024-11-20T13:24:36,137 DEBUG [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:36,144 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(327): Store=7b01f50ebe5529b12faadacb91472f69/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:36,144 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,160 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:36,160 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b01f50ebe5529b12faadacb91472f69 columnFamilyName B 2024-11-20T13:24:36,161 DEBUG [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:36,172 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(327): Store=7b01f50ebe5529b12faadacb91472f69/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:36,172 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,184 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:24:36,185 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b01f50ebe5529b12faadacb91472f69 columnFamilyName C 2024-11-20T13:24:36,185 DEBUG [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:24:36,192 INFO [StoreOpener-7b01f50ebe5529b12faadacb91472f69-1 {}] regionserver.HStore(327): Store=7b01f50ebe5529b12faadacb91472f69/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:24:36,192 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:36,200 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,208 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,232 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:24:36,252 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1085): writing seq id for 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:36,300 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:24:36,316 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1102): Opened 7b01f50ebe5529b12faadacb91472f69; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71002968, jitterRate=0.058026671409606934}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:24:36,317 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1001): Region open journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:36,320 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., pid=72, masterSystemTime=1732109076081 2024-11-20T13:24:36,322 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:36,322 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:36,322 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=7b01f50ebe5529b12faadacb91472f69, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:36,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T13:24:36,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; OpenRegionProcedure 7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 in 397 msec 2024-11-20T13:24:36,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=71, resume processing ppid=70 2024-11-20T13:24:36,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, ppid=70, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, ASSIGN in 571 msec 2024-11-20T13:24:36,334 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:24:36,334 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109076334"}]},"ts":"1732109076334"} 2024-11-20T13:24:36,336 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:24:36,345 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:24:36,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.6340 sec 2024-11-20T13:24:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-20T13:24:36,826 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 70 completed 2024-11-20T13:24:36,828 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65d62a07 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62417a4 2024-11-20T13:24:36,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab47791, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:36,856 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:36,861 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:36,864 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:24:36,869 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:24:36,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24414caa to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fee6b1 2024-11-20T13:24:36,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bdf1180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:36,926 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b1dacfc to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e231440 2024-11-20T13:24:36,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da35515, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:36,962 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32d6d0a0 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@345153ae 2024-11-20T13:24:36,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@638db3cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:36,998 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0417da98 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ae52597 2024-11-20T13:24:37,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34f8f2d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,046 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c8c7dae to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1fd62835 2024-11-20T13:24:37,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ccb0749, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,080 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3685bc77 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c004a1 2024-11-20T13:24:37,087 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6347b8b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,089 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bd3f35 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7dac62f 2024-11-20T13:24:37,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59066447, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2906ae40 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@783ceef1 2024-11-20T13:24:37,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@127299ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38280ccf to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27fc265d 2024-11-20T13:24:37,173 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21c77ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,174 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b868ff8 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d0e901b 2024-11-20T13:24:37,197 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27263b8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:24:37,215 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T13:24:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:37,220 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:37,221 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:37,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:37,245 DEBUG [hconnection-0x4fade4a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,256 DEBUG [hconnection-0x4f350d04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,258 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,260 DEBUG [hconnection-0x84b5fec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,261 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,267 DEBUG [hconnection-0x6b15d3f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,268 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54074, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,276 DEBUG [hconnection-0x2f95f9bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:37,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:37,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:37,280 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,300 DEBUG [hconnection-0x67cb09ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,301 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,302 DEBUG [hconnection-0x3e34ea1c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,304 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:37,319 DEBUG [hconnection-0x51eb3487-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,321 DEBUG [hconnection-0x7cee6286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,321 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,322 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109137317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109137324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109137324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109137324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,329 DEBUG [hconnection-0x1b563d4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:24:37,331 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:24:37,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109137332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:37,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/4cc9ebf4b16343408ab53f947961db1a is 50, key is test_row_0/A:col10/1732109077276/Put/seqid=0 2024-11-20T13:24:37,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:37,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109137427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109137428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109137428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109137436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742067_1243 (size=12001) 2024-11-20T13:24:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109137439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:37,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109137638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109137639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109137642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109137646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109137676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:37,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:37,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:37,856 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/4cc9ebf4b16343408ab53f947961db1a 2024-11-20T13:24:37,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:37,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:37,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109137945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109137945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109137951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109137953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109137983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:37,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/273bc95931874a0faf53d985d7a2d4ad is 50, key is test_row_0/B:col10/1732109077276/Put/seqid=0 2024-11-20T13:24:38,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742068_1244 (size=12001) 2024-11-20T13:24:38,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/273bc95931874a0faf53d985d7a2d4ad 2024-11-20T13:24:38,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/bc3dc8dee7cd4faebd803b5d1527423d is 50, key is test_row_0/C:col10/1732109077276/Put/seqid=0 2024-11-20T13:24:38,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742069_1245 (size=12001) 2024-11-20T13:24:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:38,333 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109138454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109138459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109138460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109138468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,487 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109138498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/bc3dc8dee7cd4faebd803b5d1527423d 2024-11-20T13:24:38,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/4cc9ebf4b16343408ab53f947961db1a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a 2024-11-20T13:24:38,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T13:24:38,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/273bc95931874a0faf53d985d7a2d4ad as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad 2024-11-20T13:24:38,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T13:24:38,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/bc3dc8dee7cd4faebd803b5d1527423d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d 2024-11-20T13:24:38,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:38,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:38,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T13:24:38,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7b01f50ebe5529b12faadacb91472f69 in 1535ms, sequenceid=12, compaction requested=false 2024-11-20T13:24:38,812 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T13:24:38,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:38,926 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:24:38,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:38,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:38,957 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:38,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:38,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ee9a09de85ce4dd896cfbd1eb806dcfd is 50, key is test_row_0/A:col10/1732109077321/Put/seqid=0 2024-11-20T13:24:39,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742070_1246 (size=12001) 2024-11-20T13:24:39,085 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ee9a09de85ce4dd896cfbd1eb806dcfd 2024-11-20T13:24:39,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/46c903d43e4044278eee6147f126548f is 50, key is test_row_0/B:col10/1732109077321/Put/seqid=0 2024-11-20T13:24:39,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742071_1247 (size=12001) 2024-11-20T13:24:39,289 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/46c903d43e4044278eee6147f126548f 2024-11-20T13:24:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:39,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/4bd8652be3eb49efa4b9dcbd59282026 is 50, key is test_row_0/C:col10/1732109077321/Put/seqid=0 2024-11-20T13:24:39,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742072_1248 (size=12001) 2024-11-20T13:24:39,428 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/4bd8652be3eb49efa4b9dcbd59282026 2024-11-20T13:24:39,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:39,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:39,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ee9a09de85ce4dd896cfbd1eb806dcfd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd 2024-11-20T13:24:39,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109139481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109139494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,507 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T13:24:39,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109139501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/46c903d43e4044278eee6147f126548f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f 2024-11-20T13:24:39,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109139514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109139514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,539 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T13:24:39,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/4bd8652be3eb49efa4b9dcbd59282026 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026 2024-11-20T13:24:39,569 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T13:24:39,570 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7b01f50ebe5529b12faadacb91472f69 in 613ms, sequenceid=38, compaction requested=false 2024-11-20T13:24:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T13:24:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T13:24:39,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T13:24:39,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3510 sec 2024-11-20T13:24:39,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 2.3590 sec 2024-11-20T13:24:39,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:39,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:39,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:39,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/688963f8620349b9807cba11e35dc323 is 50, key is test_row_0/A:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:39,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742073_1249 (size=14341) 2024-11-20T13:24:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109139717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109139718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109139719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109139719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109139835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109139836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109139824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:39,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109139830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109140084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109140081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109140089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/688963f8620349b9807cba11e35dc323 2024-11-20T13:24:40,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109140107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/1f8f692d57cd475bb3a6e270e5c519de is 50, key is test_row_0/B:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:40,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742074_1250 (size=12001) 2024-11-20T13:24:40,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109140388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109140392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109140395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109140416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/1f8f692d57cd475bb3a6e270e5c519de 2024-11-20T13:24:40,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/9731861c6fd949eaa03adf04eab49353 is 50, key is test_row_0/C:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:40,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742075_1251 (size=12001) 2024-11-20T13:24:40,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109140900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109140904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109140904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:40,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:40,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109140931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/9731861c6fd949eaa03adf04eab49353 2024-11-20T13:24:41,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/688963f8620349b9807cba11e35dc323 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323 2024-11-20T13:24:41,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323, entries=200, sequenceid=49, filesize=14.0 K 2024-11-20T13:24:41,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/1f8f692d57cd475bb3a6e270e5c519de as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de 2024-11-20T13:24:41,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T13:24:41,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/9731861c6fd949eaa03adf04eab49353 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353 2024-11-20T13:24:41,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353, entries=150, sequenceid=49, filesize=11.7 K 2024-11-20T13:24:41,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b01f50ebe5529b12faadacb91472f69 in 1508ms, sequenceid=49, compaction requested=true 2024-11-20T13:24:41,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:41,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:41,121 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:41,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:41,121 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:41,123 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:41,123 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:41,123 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:41,123 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:41,123 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:41,123 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:41,123 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=35.2 K 2024-11-20T13:24:41,123 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=37.4 K 2024-11-20T13:24:41,125 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 273bc95931874a0faf53d985d7a2d4ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732109077266 2024-11-20T13:24:41,125 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cc9ebf4b16343408ab53f947961db1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732109077266 2024-11-20T13:24:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:41,126 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 46c903d43e4044278eee6147f126548f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732109077316 2024-11-20T13:24:41,126 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee9a09de85ce4dd896cfbd1eb806dcfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732109077316 2024-11-20T13:24:41,127 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f8f692d57cd475bb3a6e270e5c519de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:41,127 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 688963f8620349b9807cba11e35dc323, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:41,145 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#207 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:41,146 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/815eddaa082d4b0db9eac83dc77e70ca is 50, key is test_row_0/A:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:41,150 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#208 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:41,151 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/41c51fd4ab8941fe9ea2662b16129fa8 is 50, key is test_row_0/B:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:41,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742076_1252 (size=12104) 2024-11-20T13:24:41,213 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/815eddaa082d4b0db9eac83dc77e70ca as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/815eddaa082d4b0db9eac83dc77e70ca 2024-11-20T13:24:41,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742077_1253 (size=12104) 2024-11-20T13:24:41,243 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 815eddaa082d4b0db9eac83dc77e70ca(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:41,243 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:41,243 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109081120; duration=0sec 2024-11-20T13:24:41,243 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:41,243 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:41,243 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:41,252 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:41,252 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:41,252 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:41,252 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=35.2 K 2024-11-20T13:24:41,256 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc3dc8dee7cd4faebd803b5d1527423d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732109077266 2024-11-20T13:24:41,256 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bd8652be3eb49efa4b9dcbd59282026, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732109077316 2024-11-20T13:24:41,257 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9731861c6fd949eaa03adf04eab49353, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:41,283 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#209 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:41,284 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c4ab8547de8c4f949ae6f7011d9cfe94 is 50, key is test_row_0/C:col10/1732109079485/Put/seqid=0 2024-11-20T13:24:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T13:24:41,330 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T13:24:41,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742078_1254 (size=12104) 2024-11-20T13:24:41,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T13:24:41,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T13:24:41,346 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:41,351 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:41,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:41,364 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c4ab8547de8c4f949ae6f7011d9cfe94 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c4ab8547de8c4f949ae6f7011d9cfe94 2024-11-20T13:24:41,397 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into c4ab8547de8c4f949ae6f7011d9cfe94(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:41,397 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:41,398 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109081126; duration=0sec 2024-11-20T13:24:41,398 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:41,398 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T13:24:41,508 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T13:24:41,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:41,509 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:24:41,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:41,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:41,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:41,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:41,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:41,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:41,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/684202332f6e4b949e0f2a3de52b74ec is 50, key is test_row_0/A:col10/1732109079718/Put/seqid=0 2024-11-20T13:24:41,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:41,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:41,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742079_1255 (size=12001) 2024-11-20T13:24:41,585 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/684202332f6e4b949e0f2a3de52b74ec 2024-11-20T13:24:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109141592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T13:24:41,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7927fececc03404284005093da614552 is 50, key is test_row_0/B:col10/1732109079718/Put/seqid=0 2024-11-20T13:24:41,658 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/41c51fd4ab8941fe9ea2662b16129fa8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/41c51fd4ab8941fe9ea2662b16129fa8 2024-11-20T13:24:41,666 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into 41c51fd4ab8941fe9ea2662b16129fa8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:41,666 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:41,666 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109081121; duration=0sec 2024-11-20T13:24:41,666 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:41,666 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742080_1256 (size=12001) 2024-11-20T13:24:41,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109141701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,706 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7927fececc03404284005093da614552 2024-11-20T13:24:41,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/d4f107a7b92446f9baac10acd25958ff is 50, key is test_row_0/C:col10/1732109079718/Put/seqid=0 2024-11-20T13:24:41,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742081_1257 (size=12001) 2024-11-20T13:24:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109141916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109141916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109141920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109141932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T13:24:41,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109141956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:42,183 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/d4f107a7b92446f9baac10acd25958ff 2024-11-20T13:24:42,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/684202332f6e4b949e0f2a3de52b74ec as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec 2024-11-20T13:24:42,199 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:24:42,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7927fececc03404284005093da614552 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552 2024-11-20T13:24:42,208 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:24:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/d4f107a7b92446f9baac10acd25958ff as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff 2024-11-20T13:24:42,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109142228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:42,244 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:24:42,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b01f50ebe5529b12faadacb91472f69 in 736ms, sequenceid=76, compaction requested=false 2024-11-20T13:24:42,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:42,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:42,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T13:24:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T13:24:42,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T13:24:42,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 899 msec 2024-11-20T13:24:42,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 914 msec 2024-11-20T13:24:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T13:24:42,450 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T13:24:42,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T13:24:42,454 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T13:24:42,456 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:42,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T13:24:42,612 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:42,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T13:24:42,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:42,613 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:42,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:42,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/2e70f96922e844169d09a5200acc22ce is 50, key is test_row_0/A:col10/1732109081536/Put/seqid=0 2024-11-20T13:24:42,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742082_1258 (size=12001) 2024-11-20T13:24:42,665 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/2e70f96922e844169d09a5200acc22ce 2024-11-20T13:24:42,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7a6bd02d17a84028a9ccada74e4386a0 is 50, key is test_row_0/B:col10/1732109081536/Put/seqid=0 2024-11-20T13:24:42,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742083_1259 (size=12001) 2024-11-20T13:24:42,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T13:24:42,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109142974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T13:24:43,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109143089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,129 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7a6bd02d17a84028a9ccada74e4386a0 2024-11-20T13:24:43,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e503073b18904699a3f3c75bee4a0df3 is 50, key is test_row_0/C:col10/1732109081536/Put/seqid=0 2024-11-20T13:24:43,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742084_1260 (size=12001) 2024-11-20T13:24:43,211 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e503073b18904699a3f3c75bee4a0df3 2024-11-20T13:24:43,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/2e70f96922e844169d09a5200acc22ce as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce 2024-11-20T13:24:43,228 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T13:24:43,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/7a6bd02d17a84028a9ccada74e4386a0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0 2024-11-20T13:24:43,237 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T13:24:43,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e503073b18904699a3f3c75bee4a0df3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3 2024-11-20T13:24:43,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3, entries=150, sequenceid=88, filesize=11.7 K 2024-11-20T13:24:43,247 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b01f50ebe5529b12faadacb91472f69 in 634ms, sequenceid=88, compaction requested=true 2024-11-20T13:24:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T13:24:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T13:24:43,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T13:24:43,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 793 msec 2024-11-20T13:24:43,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 799 msec 2024-11-20T13:24:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:43,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:43,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d3cb5e937f8e49028e80caa289e16012 is 50, key is test_row_0/A:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:43,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742085_1261 (size=14341) 2024-11-20T13:24:43,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109143393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109143503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T13:24:43,568 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T13:24:43,581 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T13:24:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:43,589 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:43,597 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:43,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:43,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:43,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109143711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d3cb5e937f8e49028e80caa289e16012 2024-11-20T13:24:43,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5030247d319f4f609668437636b31284 is 50, key is test_row_0/B:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:43,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742086_1262 (size=12001) 2024-11-20T13:24:43,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5030247d319f4f609668437636b31284 2024-11-20T13:24:43,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/46531333cf5041a59678c218b5ea0f18 is 50, key is test_row_0/C:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:43,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742087_1263 (size=12001) 2024-11-20T13:24:43,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/46531333cf5041a59678c218b5ea0f18 2024-11-20T13:24:43,918 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:43,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:43,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d3cb5e937f8e49028e80caa289e16012 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012 2024-11-20T13:24:43,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:43,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012, entries=200, sequenceid=114, filesize=14.0 K 2024-11-20T13:24:43,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5030247d319f4f609668437636b31284 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284 2024-11-20T13:24:43,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109143929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,942 DEBUG [Thread-1125 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4222 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:43,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T13:24:43,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/46531333cf5041a59678c218b5ea0f18 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18 2024-11-20T13:24:43,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109143944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:43,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T13:24:43,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109143952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:43,956 DEBUG [Thread-1127 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4238 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:43,956 DEBUG [Thread-1129 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4238 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:43,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7b01f50ebe5529b12faadacb91472f69 in 653ms, sequenceid=114, compaction requested=true 2024-11-20T13:24:43,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:43,958 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:43,960 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:43,961 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:43,961 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,961 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/815eddaa082d4b0db9eac83dc77e70ca, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=49.3 K 2024-11-20T13:24:43,962 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 815eddaa082d4b0db9eac83dc77e70ca, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:43,962 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 684202332f6e4b949e0f2a3de52b74ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109079716 2024-11-20T13:24:43,963 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e70f96922e844169d09a5200acc22ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732109081536 2024-11-20T13:24:43,963 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3cb5e937f8e49028e80caa289e16012, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082944 2024-11-20T13:24:43,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:43,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:43,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:43,976 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:43,978 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:43,979 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:43,979 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:43,979 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/41c51fd4ab8941fe9ea2662b16129fa8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=47.0 K 2024-11-20T13:24:43,981 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 41c51fd4ab8941fe9ea2662b16129fa8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:43,981 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7927fececc03404284005093da614552, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109079716 2024-11-20T13:24:43,982 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a6bd02d17a84028a9ccada74e4386a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732109081536 2024-11-20T13:24:43,982 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5030247d319f4f609668437636b31284, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082960 2024-11-20T13:24:43,991 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:43,992 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/4e76ff5db0a743bd911bcc10c5dd5ecb is 50, key is test_row_0/A:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:44,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:44,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:44,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:44,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:44,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:44,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,010 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#220 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:44,011 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/b6d335ac8d934633ba9144a12911397d is 50, key is test_row_0/B:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:44,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742088_1264 (size=12241) 2024-11-20T13:24:44,037 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/4e76ff5db0a743bd911bcc10c5dd5ecb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4e76ff5db0a743bd911bcc10c5dd5ecb 2024-11-20T13:24:44,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 is 50, key is test_row_0/A:col10/1732109083996/Put/seqid=0 2024-11-20T13:24:44,051 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 4e76ff5db0a743bd911bcc10c5dd5ecb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:44,051 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:44,052 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=12, startTime=1732109083958; duration=0sec 2024-11-20T13:24:44,052 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:44,052 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:44,052 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:24:44,054 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:24:44,054 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:44,054 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,054 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c4ab8547de8c4f949ae6f7011d9cfe94, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=47.0 K 2024-11-20T13:24:44,055 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4ab8547de8c4f949ae6f7011d9cfe94, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732109079485 2024-11-20T13:24:44,055 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4f107a7b92446f9baac10acd25958ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109079716 2024-11-20T13:24:44,056 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e503073b18904699a3f3c75bee4a0df3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732109081536 2024-11-20T13:24:44,056 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46531333cf5041a59678c218b5ea0f18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082960 2024-11-20T13:24:44,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742089_1265 (size=12241) 2024-11-20T13:24:44,109 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/b6d335ac8d934633ba9144a12911397d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/b6d335ac8d934633ba9144a12911397d 2024-11-20T13:24:44,116 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into b6d335ac8d934633ba9144a12911397d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:44,116 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:44,117 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=12, startTime=1732109083976; duration=0sec 2024-11-20T13:24:44,117 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:44,117 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:44,123 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#222 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:44,124 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/37b0b959e9004daebc064bea51e51749 is 50, key is test_row_0/C:col10/1732109082970/Put/seqid=0 2024-11-20T13:24:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742090_1266 (size=12001) 2024-11-20T13:24:44,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 2024-11-20T13:24:44,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109144175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109144183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742091_1267 (size=12241) 2024-11-20T13:24:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:44,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/cba8491641df4c6ba24c44cdb4c70fb2 is 50, key is test_row_0/B:col10/1732109083996/Put/seqid=0 2024-11-20T13:24:44,244 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/37b0b959e9004daebc064bea51e51749 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/37b0b959e9004daebc064bea51e51749 2024-11-20T13:24:44,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742092_1268 (size=12001) 2024-11-20T13:24:44,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,258 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 37b0b959e9004daebc064bea51e51749(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:44,258 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:44,258 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=12, startTime=1732109084004; duration=0sec 2024-11-20T13:24:44,258 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:44,258 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:44,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109144285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109144290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109144488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109144505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/cba8491641df4c6ba24c44cdb4c70fb2 2024-11-20T13:24:44,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/7253720da36a41649c8651f8b6a0e481 is 50, key is test_row_0/C:col10/1732109083996/Put/seqid=0 2024-11-20T13:24:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:44,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742093_1269 (size=12001) 2024-11-20T13:24:44,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/7253720da36a41649c8651f8b6a0e481 2024-11-20T13:24:44,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 2024-11-20T13:24:44,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T13:24:44,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/cba8491641df4c6ba24c44cdb4c70fb2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2 2024-11-20T13:24:44,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T13:24:44,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/7253720da36a41649c8651f8b6a0e481 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481 2024-11-20T13:24:44,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T13:24:44,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7b01f50ebe5529b12faadacb91472f69 in 797ms, sequenceid=127, compaction requested=false 2024-11-20T13:24:44,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:44,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:44,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:44,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:44,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d1bb7863d67f4e6c91b3e4e9c4eb354f is 50, key is test_row_0/A:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109144849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109144850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742094_1270 (size=14541) 2024-11-20T13:24:44,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:44,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:44,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:44,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:44,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109144961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:44,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:44,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109144980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:45,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109145168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:45,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109145194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d1bb7863d67f4e6c91b3e4e9c4eb354f 2024-11-20T13:24:45,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d0470cf051944badb1e38f55bbf3a1e8 is 50, key is test_row_0/B:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:45,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742095_1271 (size=12151) 2024-11-20T13:24:45,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109145475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:45,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109145512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:45,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T13:24:45,751 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T13:24:45,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d0470cf051944badb1e38f55bbf3a1e8 2024-11-20T13:24:45,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:45,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/15488a48131c49529ba9b5e52bdf0d53 is 50, key is test_row_0/C:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:45,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742096_1272 (size=12151) 2024-11-20T13:24:45,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/15488a48131c49529ba9b5e52bdf0d53 2024-11-20T13:24:45,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d1bb7863d67f4e6c91b3e4e9c4eb354f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f 2024-11-20T13:24:45,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f, entries=200, sequenceid=154, filesize=14.2 K 2024-11-20T13:24:45,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d0470cf051944badb1e38f55bbf3a1e8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8 2024-11-20T13:24:45,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T13:24:45,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/15488a48131c49529ba9b5e52bdf0d53 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53 2024-11-20T13:24:45,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T13:24:45,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b01f50ebe5529b12faadacb91472f69 in 1160ms, sequenceid=154, compaction requested=true 2024-11-20T13:24:45,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:45,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:45,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:45,963 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:45,963 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:45,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:45,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:45,965 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:45,965 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:45,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:45,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:45,965 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,965 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4e76ff5db0a743bd911bcc10c5dd5ecb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=37.9 K 2024-11-20T13:24:45,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:45,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:45,966 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,966 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/b6d335ac8d934633ba9144a12911397d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=35.5 K 2024-11-20T13:24:45,967 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b6d335ac8d934633ba9144a12911397d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082960 2024-11-20T13:24:45,967 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e76ff5db0a743bd911bcc10c5dd5ecb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082960 2024-11-20T13:24:45,967 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cba8491641df4c6ba24c44cdb4c70fb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732109083969 2024-11-20T13:24:45,968 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b5013f3a33a4bf4abbeb4f0d5f3d840, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732109083969 2024-11-20T13:24:45,968 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d0470cf051944badb1e38f55bbf3a1e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:45,968 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1bb7863d67f4e6c91b3e4e9c4eb354f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:45,980 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:45,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T13:24:45,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:45,984 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:45,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:45,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:46,010 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:46,011 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e15d86e4e96c4a9b89f375fe13e978e4 is 50, key is test_row_0/B:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:46,038 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:46,039 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d31b26256f8b4a57a09be4f373c12d09 is 50, key is test_row_0/A:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:46,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/0dda916686864ceb8d2a44554fc7870d is 50, key is test_row_0/A:col10/1732109084849/Put/seqid=0 2024-11-20T13:24:46,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109146120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109146120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742097_1273 (size=12493) 2024-11-20T13:24:46,143 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e15d86e4e96c4a9b89f375fe13e978e4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e15d86e4e96c4a9b89f375fe13e978e4 2024-11-20T13:24:46,150 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into e15d86e4e96c4a9b89f375fe13e978e4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:46,150 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:46,150 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109085963; duration=0sec 2024-11-20T13:24:46,150 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:46,150 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:46,150 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:46,153 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:46,153 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:46,153 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:46,153 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/37b0b959e9004daebc064bea51e51749, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=35.5 K 2024-11-20T13:24:46,154 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 37b0b959e9004daebc064bea51e51749, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732109082960 2024-11-20T13:24:46,154 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7253720da36a41649c8651f8b6a0e481, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732109083969 2024-11-20T13:24:46,155 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 15488a48131c49529ba9b5e52bdf0d53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:46,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742098_1274 (size=12493) 2024-11-20T13:24:46,165 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d31b26256f8b4a57a09be4f373c12d09 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d31b26256f8b4a57a09be4f373c12d09 2024-11-20T13:24:46,172 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into d31b26256f8b4a57a09be4f373c12d09(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:46,172 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:46,172 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109085963; duration=0sec 2024-11-20T13:24:46,173 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:46,173 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:46,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742099_1275 (size=9757) 2024-11-20T13:24:46,185 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/0dda916686864ceb8d2a44554fc7870d 2024-11-20T13:24:46,188 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#231 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:46,189 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/7166be2fcb7a404b9000095f9a14b331 is 50, key is test_row_0/C:col10/1732109084137/Put/seqid=0 2024-11-20T13:24:46,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109146231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742100_1276 (size=12493) 2024-11-20T13:24:46,246 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/7166be2fcb7a404b9000095f9a14b331 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7166be2fcb7a404b9000095f9a14b331 2024-11-20T13:24:46,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b is 50, key is test_row_0/B:col10/1732109084849/Put/seqid=0 2024-11-20T13:24:46,255 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 7166be2fcb7a404b9000095f9a14b331(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:46,255 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:46,255 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109085965; duration=0sec 2024-11-20T13:24:46,255 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:46,255 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:46,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109146255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742101_1277 (size=9757) 2024-11-20T13:24:46,285 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b 2024-11-20T13:24:46,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c0bd2ee37b2a434989f1f4b19b116ad5 is 50, key is test_row_0/C:col10/1732109084849/Put/seqid=0 2024-11-20T13:24:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742102_1278 (size=9757) 2024-11-20T13:24:46,353 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c0bd2ee37b2a434989f1f4b19b116ad5 2024-11-20T13:24:46,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/0dda916686864ceb8d2a44554fc7870d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d 2024-11-20T13:24:46,364 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d, entries=100, sequenceid=166, filesize=9.5 K 2024-11-20T13:24:46,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b 2024-11-20T13:24:46,371 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b, entries=100, sequenceid=166, filesize=9.5 K 2024-11-20T13:24:46,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c0bd2ee37b2a434989f1f4b19b116ad5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5 2024-11-20T13:24:46,379 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5, entries=100, sequenceid=166, filesize=9.5 K 2024-11-20T13:24:46,381 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b01f50ebe5529b12faadacb91472f69 in 397ms, sequenceid=166, compaction requested=false 2024-11-20T13:24:46,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:46,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:46,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T13:24:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T13:24:46,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T13:24:46,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8020 sec 2024-11-20T13:24:46,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.8210 sec 2024-11-20T13:24:46,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:24:46,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:46,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:46,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:46,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:46,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:46,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:46,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ab6669459f9a478297044d1d74464ec4 is 50, key is test_row_0/A:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:46,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109146493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109146495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742103_1279 (size=14541) 2024-11-20T13:24:46,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109146601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109146605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109146808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109146813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:46,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ab6669459f9a478297044d1d74464ec4 2024-11-20T13:24:46,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e8bbd1b5a4da49a88bc9309b3366ca1a is 50, key is test_row_0/B:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:47,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742104_1280 (size=12151) 2024-11-20T13:24:47,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109147117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:47,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:47,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109147119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:47,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e8bbd1b5a4da49a88bc9309b3366ca1a 2024-11-20T13:24:47,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 is 50, key is test_row_0/C:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:47,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742105_1281 (size=12151) 2024-11-20T13:24:47,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 2024-11-20T13:24:47,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ab6669459f9a478297044d1d74464ec4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4 2024-11-20T13:24:47,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:47,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109147626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:47,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:47,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109147626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:47,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4, entries=200, sequenceid=194, filesize=14.2 K 2024-11-20T13:24:47,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e8bbd1b5a4da49a88bc9309b3366ca1a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a 2024-11-20T13:24:47,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T13:24:47,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 2024-11-20T13:24:47,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T13:24:47,701 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T13:24:47,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:47,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T13:24:47,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T13:24:47,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T13:24:47,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 7b01f50ebe5529b12faadacb91472f69 in 1282ms, sequenceid=194, compaction requested=true 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:47,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:24:47,726 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:47,726 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:47,730 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:47,733 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:47,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:47,740 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:47,740 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:47,740 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:47,741 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d31b26256f8b4a57a09be4f373c12d09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=35.9 K 2024-11-20T13:24:47,741 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:47,741 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:47,741 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:47,741 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7166be2fcb7a404b9000095f9a14b331, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=33.6 K 2024-11-20T13:24:47,742 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7166be2fcb7a404b9000095f9a14b331, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:47,742 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d31b26256f8b4a57a09be4f373c12d09, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:47,743 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c0bd2ee37b2a434989f1f4b19b116ad5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732109084835 2024-11-20T13:24:47,743 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dda916686864ceb8d2a44554fc7870d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732109084835 2024-11-20T13:24:47,744 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f1ee4fe86ef14fe396e85ec65c7bb2c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:47,744 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab6669459f9a478297044d1d74464ec4, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:47,778 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:47,779 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/857cc02eb2ee4e4ab4c4af5f3d33476f is 50, key is test_row_0/A:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:47,782 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#238 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:47,783 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/04bfbb12377744a8a7609cfdec0dd02b is 50, key is test_row_0/C:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:47,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742106_1282 (size=12595) 2024-11-20T13:24:47,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T13:24:47,838 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/857cc02eb2ee4e4ab4c4af5f3d33476f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/857cc02eb2ee4e4ab4c4af5f3d33476f 2024-11-20T13:24:47,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742107_1283 (size=12595) 2024-11-20T13:24:47,852 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 857cc02eb2ee4e4ab4c4af5f3d33476f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:47,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:47,852 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109087726; duration=0sec 2024-11-20T13:24:47,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:47,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:47,852 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:47,855 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:47,856 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:47,856 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:47,856 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e15d86e4e96c4a9b89f375fe13e978e4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=33.6 K 2024-11-20T13:24:47,857 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e15d86e4e96c4a9b89f375fe13e978e4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109084137 2024-11-20T13:24:47,857 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37b9f4feb47a4fd5a68bf3e4cd7d6c9b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1732109084835 2024-11-20T13:24:47,858 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8bbd1b5a4da49a88bc9309b3366ca1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:47,870 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#239 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:47,871 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/1a3851dad2574233b06bf8efd0e0809f is 50, key is test_row_0/B:col10/1732109086103/Put/seqid=0 2024-11-20T13:24:47,885 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:47,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T13:24:47,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:47,888 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:47,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742108_1284 (size=12595) 2024-11-20T13:24:47,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/b9194e444eac46f8ba2b9db79dc53c8f is 50, key is test_row_0/A:col10/1732109086490/Put/seqid=0 2024-11-20T13:24:47,957 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/1a3851dad2574233b06bf8efd0e0809f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1a3851dad2574233b06bf8efd0e0809f 2024-11-20T13:24:47,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742109_1285 (size=12151) 2024-11-20T13:24:47,968 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/b9194e444eac46f8ba2b9db79dc53c8f 2024-11-20T13:24:47,971 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into 1a3851dad2574233b06bf8efd0e0809f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:47,971 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:47,971 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109087726; duration=0sec 2024-11-20T13:24:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:47,971 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:47,971 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:47,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:47,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/a9404fb9f3304b119fe5ee71c6b21a6e is 50, key is test_row_0/B:col10/1732109086490/Put/seqid=0 2024-11-20T13:24:48,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T13:24:48,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742110_1286 (size=12151) 2024-11-20T13:24:48,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109148054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109148052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109148060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109148166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109148166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109148179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,264 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/04bfbb12377744a8a7609cfdec0dd02b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/04bfbb12377744a8a7609cfdec0dd02b 2024-11-20T13:24:48,280 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 04bfbb12377744a8a7609cfdec0dd02b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:48,280 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:48,280 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109087726; duration=0sec 2024-11-20T13:24:48,280 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:48,280 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T13:24:48,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109148376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109148380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109148388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,441 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/a9404fb9f3304b119fe5ee71c6b21a6e 2024-11-20T13:24:48,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e09bb48bba174575a98d0d89224b5ac2 is 50, key is test_row_0/C:col10/1732109086490/Put/seqid=0 2024-11-20T13:24:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742111_1287 (size=12151) 2024-11-20T13:24:48,530 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e09bb48bba174575a98d0d89224b5ac2 2024-11-20T13:24:48,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/b9194e444eac46f8ba2b9db79dc53c8f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f 2024-11-20T13:24:48,552 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T13:24:48,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/a9404fb9f3304b119fe5ee71c6b21a6e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e 2024-11-20T13:24:48,559 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T13:24:48,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e09bb48bba174575a98d0d89224b5ac2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2 2024-11-20T13:24:48,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T13:24:48,572 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 7b01f50ebe5529b12faadacb91472f69 in 684ms, sequenceid=207, compaction requested=false 2024-11-20T13:24:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:48,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:48,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T13:24:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T13:24:48,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T13:24:48,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 855 msec 2024-11-20T13:24:48,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 877 msec 2024-11-20T13:24:48,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:48,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:48,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ef3ae4cb3898428eaff2e35e72607184 is 50, key is test_row_0/A:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:48,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109148696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109148696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109148696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109148696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109148699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742112_1288 (size=12151) 2024-11-20T13:24:48,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ef3ae4cb3898428eaff2e35e72607184 2024-11-20T13:24:48,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e6cdd6097db14214ae3119ba377629f8 is 50, key is test_row_0/B:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742113_1289 (size=12151) 2024-11-20T13:24:48,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109148811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109148811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T13:24:48,831 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T13:24:48,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T13:24:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:48,844 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:48,845 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:48,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:48,997 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:48,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:48,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:48,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:48,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:48,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:48,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:48,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109149017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109149020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:49,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:49,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:49,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e6cdd6097db14214ae3119ba377629f8 2024-11-20T13:24:49,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109149203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109149204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109149204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/1e6e7f9e5d5c44fe86a70d1910fd0113 is 50, key is test_row_0/C:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:49,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742114_1290 (size=12151) 2024-11-20T13:24:49,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109149328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109149330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:49,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:49,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:49,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:49,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:49,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,646 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:49,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:49,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:49,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/1e6e7f9e5d5c44fe86a70d1910fd0113 2024-11-20T13:24:49,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/ef3ae4cb3898428eaff2e35e72607184 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184 2024-11-20T13:24:49,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T13:24:49,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e6cdd6097db14214ae3119ba377629f8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8 2024-11-20T13:24:49,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T13:24:49,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/1e6e7f9e5d5c44fe86a70d1910fd0113 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113 2024-11-20T13:24:49,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T13:24:49,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7b01f50ebe5529b12faadacb91472f69 in 1115ms, sequenceid=235, compaction requested=true 2024-11-20T13:24:49,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:49,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:49,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:49,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:49,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:49,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:49,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:24:49,771 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:49,771 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:49,776 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:49,776 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:49,777 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,777 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/857cc02eb2ee4e4ab4c4af5f3d33476f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.0 K 2024-11-20T13:24:49,777 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:49,777 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:49,777 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,777 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/04bfbb12377744a8a7609cfdec0dd02b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.0 K 2024-11-20T13:24:49,778 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 04bfbb12377744a8a7609cfdec0dd02b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:49,779 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857cc02eb2ee4e4ab4c4af5f3d33476f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:49,779 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e09bb48bba174575a98d0d89224b5ac2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109086483 2024-11-20T13:24:49,784 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9194e444eac46f8ba2b9db79dc53c8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109086483 2024-11-20T13:24:49,784 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e6e7f9e5d5c44fe86a70d1910fd0113, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:49,785 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef3ae4cb3898428eaff2e35e72607184, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:49,807 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:49,808 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/df4161c9d46c40779028f986f297ca0e is 50, key is test_row_0/A:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:49,812 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T13:24:49,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,816 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T13:24:49,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:49,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:49,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:49,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:49,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:49,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:49,818 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:49,819 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/33ae2690beab4ffe90868583e3b311e7 is 50, key is test_row_0/C:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:49,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:49,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/411a7f067292426886148bbcf9e93692 is 50, key is test_row_0/A:col10/1732109088689/Put/seqid=0 2024-11-20T13:24:49,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742115_1291 (size=12697) 2024-11-20T13:24:49,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:49,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742116_1292 (size=12697) 2024-11-20T13:24:49,955 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/df4161c9d46c40779028f986f297ca0e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/df4161c9d46c40779028f986f297ca0e 2024-11-20T13:24:49,972 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into df4161c9d46c40779028f986f297ca0e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:49,972 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:49,972 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109089770; duration=0sec 2024-11-20T13:24:49,973 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:49,973 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:49,973 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:49,975 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:49,975 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:49,975 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:49,975 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1a3851dad2574233b06bf8efd0e0809f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.0 K 2024-11-20T13:24:49,976 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a3851dad2574233b06bf8efd0e0809f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732109086103 2024-11-20T13:24:49,976 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9404fb9f3304b119fe5ee71c6b21a6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109086483 2024-11-20T13:24:49,977 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6cdd6097db14214ae3119ba377629f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:49,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109149971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109149973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:49,999 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:49,999 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e264b278673b4600bf2cf9ead77b09a4 is 50, key is test_row_0/B:col10/1732109088648/Put/seqid=0 2024-11-20T13:24:50,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742117_1293 (size=12151) 2024-11-20T13:24:50,010 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/411a7f067292426886148bbcf9e93692 2024-11-20T13:24:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742118_1294 (size=12697) 2024-11-20T13:24:50,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f780ac07874f44b68ce3ea717e6dd486 is 50, key is test_row_0/B:col10/1732109088689/Put/seqid=0 2024-11-20T13:24:50,076 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/e264b278673b4600bf2cf9ead77b09a4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e264b278673b4600bf2cf9ead77b09a4 2024-11-20T13:24:50,084 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into e264b278673b4600bf2cf9ead77b09a4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:50,084 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:50,084 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109089771; duration=0sec 2024-11-20T13:24:50,085 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:50,085 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:50,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109150084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109150085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742119_1295 (size=12151) 2024-11-20T13:24:50,100 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f780ac07874f44b68ce3ea717e6dd486 2024-11-20T13:24:50,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/78773de2c9dd454d80caa3c62172107c is 50, key is test_row_0/C:col10/1732109088689/Put/seqid=0 2024-11-20T13:24:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742120_1296 (size=12151) 2024-11-20T13:24:50,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109150216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109150216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109150240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109150290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109150292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,358 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/33ae2690beab4ffe90868583e3b311e7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/33ae2690beab4ffe90868583e3b311e7 2024-11-20T13:24:50,365 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 33ae2690beab4ffe90868583e3b311e7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:50,365 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:50,365 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109089771; duration=0sec 2024-11-20T13:24:50,365 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:50,367 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:50,563 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/78773de2c9dd454d80caa3c62172107c 2024-11-20T13:24:50,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/411a7f067292426886148bbcf9e93692 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692 2024-11-20T13:24:50,576 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T13:24:50,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f780ac07874f44b68ce3ea717e6dd486 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486 2024-11-20T13:24:50,583 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T13:24:50,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/78773de2c9dd454d80caa3c62172107c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c 2024-11-20T13:24:50,592 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c, entries=150, sequenceid=245, filesize=11.9 K 2024-11-20T13:24:50,596 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 7b01f50ebe5529b12faadacb91472f69 in 780ms, sequenceid=245, compaction requested=false 2024-11-20T13:24:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T13:24:50,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T13:24:50,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T13:24:50,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7530 sec 2024-11-20T13:24:50,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.7630 sec 2024-11-20T13:24:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:50,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:50,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d44542d242f548a9abc9456e3b09a281 is 50, key is test_row_0/A:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:50,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109150646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109150649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742121_1297 (size=12301) 2024-11-20T13:24:50,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d44542d242f548a9abc9456e3b09a281 2024-11-20T13:24:50,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d6c9c41019de43ddb79e95c755fb9ec3 is 50, key is test_row_0/B:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:50,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109150755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:50,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109150765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:50,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742122_1298 (size=12301) 2024-11-20T13:24:50,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d6c9c41019de43ddb79e95c755fb9ec3 2024-11-20T13:24:50,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/3bdb90dd3d3e46799ab7b4d1f133cccd is 50, key is test_row_0/C:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:50,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742123_1299 (size=12301) 2024-11-20T13:24:50,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/3bdb90dd3d3e46799ab7b4d1f133cccd 2024-11-20T13:24:50,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/d44542d242f548a9abc9456e3b09a281 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281 2024-11-20T13:24:50,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T13:24:50,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d6c9c41019de43ddb79e95c755fb9ec3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3 2024-11-20T13:24:50,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T13:24:50,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/3bdb90dd3d3e46799ab7b4d1f133cccd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd 2024-11-20T13:24:50,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T13:24:50,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 7b01f50ebe5529b12faadacb91472f69 in 308ms, sequenceid=276, compaction requested=true 2024-11-20T13:24:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:50,916 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:50,917 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:50,918 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:50,918 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:50,918 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:50,918 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e264b278673b4600bf2cf9ead77b09a4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.3 K 2024-11-20T13:24:50,919 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:50,919 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:50,919 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:50,919 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/df4161c9d46c40779028f986f297ca0e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.3 K 2024-11-20T13:24:50,919 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting df4161c9d46c40779028f986f297ca0e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:50,919 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e264b278673b4600bf2cf9ead77b09a4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:50,920 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 411a7f067292426886148bbcf9e93692, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732109088689 2024-11-20T13:24:50,920 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f780ac07874f44b68ce3ea717e6dd486, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732109088689 2024-11-20T13:24:50,920 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d6c9c41019de43ddb79e95c755fb9ec3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:50,921 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d44542d242f548a9abc9456e3b09a281, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:50,942 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:50,942 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/3946f9ceec274b9aa1f43ca4caaa063d is 50, key is test_row_0/B:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:50,950 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:50,951 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/44e8a51c93284f1dba2e6874fa1b39cc is 50, key is test_row_0/A:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:50,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T13:24:50,953 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T13:24:50,965 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:50,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T13:24:50,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:50,976 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:50,977 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:50,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:50,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:24:50,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:50,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:50,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:50,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:51,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742124_1300 (size=12949) 2024-11-20T13:24:51,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/6ed8146ef20d43b3bfea4afbc0bc91e3 is 50, key is test_row_0/A:col10/1732109090989/Put/seqid=0 2024-11-20T13:24:51,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742125_1301 (size=12949) 2024-11-20T13:24:51,048 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/3946f9ceec274b9aa1f43ca4caaa063d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/3946f9ceec274b9aa1f43ca4caaa063d 2024-11-20T13:24:51,072 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/44e8a51c93284f1dba2e6874fa1b39cc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/44e8a51c93284f1dba2e6874fa1b39cc 2024-11-20T13:24:51,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:51,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742126_1302 (size=17181) 2024-11-20T13:24:51,120 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into 3946f9ceec274b9aa1f43ca4caaa063d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:51,120 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:51,120 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109090916; duration=0sec 2024-11-20T13:24:51,120 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:51,121 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:51,121 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:51,123 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:51,123 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:51,123 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,123 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/33ae2690beab4ffe90868583e3b311e7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.3 K 2024-11-20T13:24:51,124 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 33ae2690beab4ffe90868583e3b311e7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732109088029 2024-11-20T13:24:51,124 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 78773de2c9dd454d80caa3c62172107c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732109088689 2024-11-20T13:24:51,125 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bdb90dd3d3e46799ab7b4d1f133cccd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:51,132 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 44e8a51c93284f1dba2e6874fa1b39cc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:51,132 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:51,132 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109090916; duration=0sec 2024-11-20T13:24:51,132 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:51,132 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:51,134 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,175 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:51,176 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/60a6b32bd85d45d8b17d2f7df3f7e749 is 50, key is test_row_0/C:col10/1732109090602/Put/seqid=0 2024-11-20T13:24:51,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742127_1303 (size=12949) 2024-11-20T13:24:51,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109151223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109151226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:51,290 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,294 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43647 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=5ef453f0fbb6,46739,1732109006137, table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-20T13:24:51,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109151328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109151332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,448 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/6ed8146ef20d43b3bfea4afbc0bc91e3 2024-11-20T13:24:51,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109151536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109151538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:51,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f89e8a2942964fe3a3912bfa8d58607c is 50, key is test_row_0/B:col10/1732109090989/Put/seqid=0 2024-11-20T13:24:51,608 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742128_1304 (size=12301) 2024-11-20T13:24:51,646 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/60a6b32bd85d45d8b17d2f7df3f7e749 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/60a6b32bd85d45d8b17d2f7df3f7e749 2024-11-20T13:24:51,697 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 60a6b32bd85d45d8b17d2f7df3f7e749(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:51,697 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:51,697 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109090917; duration=0sec 2024-11-20T13:24:51,697 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:51,697 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:51,772 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109151844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109151845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:51,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:51,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f89e8a2942964fe3a3912bfa8d58607c 2024-11-20T13:24:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:52,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/77442a235cf548fb9cf240a091f8935c is 50, key is test_row_0/C:col10/1732109090989/Put/seqid=0 2024-11-20T13:24:52,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109152228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742129_1305 (size=12301) 2024-11-20T13:24:52,232 DEBUG [Thread-1127 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:52,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/77442a235cf548fb9cf240a091f8935c 2024-11-20T13:24:52,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109152234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,256 DEBUG [Thread-1125 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:52,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109152264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/6ed8146ef20d43b3bfea4afbc0bc91e3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3 2024-11-20T13:24:52,274 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,276 DEBUG [Thread-1129 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4223 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:52,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3, entries=250, sequenceid=288, filesize=16.8 K 2024-11-20T13:24:52,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/f89e8a2942964fe3a3912bfa8d58607c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c 2024-11-20T13:24:52,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T13:24:52,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/77442a235cf548fb9cf240a091f8935c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c 2024-11-20T13:24:52,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T13:24:52,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b01f50ebe5529b12faadacb91472f69 in 1338ms, sequenceid=288, compaction requested=false 2024-11-20T13:24:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:52,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:24:52,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:52,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:52,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:52,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:52,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:52,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:52,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/72a6aca2f324409bb92d9bf338ff8561 is 50, key is test_row_0/A:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:52,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109152411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109152414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742130_1306 (size=14741) 2024-11-20T13:24:52,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109152520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109152525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109152736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109152740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,761 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/72a6aca2f324409bb92d9bf338ff8561 2024-11-20T13:24:52,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5d4691fbd9f047c3bad2fc3674d68211 is 50, key is test_row_0/B:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:52,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742131_1307 (size=12301) 2024-11-20T13:24:52,915 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:52,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:52,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:52,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:52,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109153051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109153048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,080 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:53,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:53,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:53,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:53,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:53,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5d4691fbd9f047c3bad2fc3674d68211 2024-11-20T13:24:53,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/fb8c1da388924f30a83ae4dce611b0d4 is 50, key is test_row_0/C:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:53,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742132_1308 (size=12301) 2024-11-20T13:24:53,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/fb8c1da388924f30a83ae4dce611b0d4 2024-11-20T13:24:53,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/72a6aca2f324409bb92d9bf338ff8561 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561 2024-11-20T13:24:53,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561, entries=200, sequenceid=316, filesize=14.4 K 2024-11-20T13:24:53,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/5d4691fbd9f047c3bad2fc3674d68211 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211 2024-11-20T13:24:53,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:53,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:53,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:53,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T13:24:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/fb8c1da388924f30a83ae4dce611b0d4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4 2024-11-20T13:24:53,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T13:24:53,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b01f50ebe5529b12faadacb91472f69 in 1055ms, sequenceid=316, compaction requested=true 2024-11-20T13:24:53,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:53,414 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:53,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:53,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:53,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:53,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:53,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:53,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:53,415 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44871 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:53,415 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:53,415 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,415 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/44e8a51c93284f1dba2e6874fa1b39cc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=43.8 K 2024-11-20T13:24:53,416 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:53,420 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 44e8a51c93284f1dba2e6874fa1b39cc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:53,424 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed8146ef20d43b3bfea4afbc0bc91e3, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732109090645 2024-11-20T13:24:53,426 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:53,426 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:53,426 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,426 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/3946f9ceec274b9aa1f43ca4caaa063d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.7 K 2024-11-20T13:24:53,426 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 72a6aca2f324409bb92d9bf338ff8561, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:53,428 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3946f9ceec274b9aa1f43ca4caaa063d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:53,429 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting f89e8a2942964fe3a3912bfa8d58607c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732109090645 2024-11-20T13:24:53,430 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d4691fbd9f047c3bad2fc3674d68211, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:53,466 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:53,467 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/cb20215100fc45d59ed664345e3fce51 is 50, key is test_row_0/A:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:53,484 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#265 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:53,485 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/a9b59dcc82f6485e88238ba31a2d6742 is 50, key is test_row_0/B:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742133_1309 (size=13051) 2024-11-20T13:24:53,553 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,556 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:53,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:53,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:53,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742134_1310 (size=13051) 2024-11-20T13:24:53,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/a08dcc8b9be64194aae31f6ace286afe is 50, key is test_row_0/A:col10/1732109092410/Put/seqid=0 2024-11-20T13:24:53,609 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/a9b59dcc82f6485e88238ba31a2d6742 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9b59dcc82f6485e88238ba31a2d6742 2024-11-20T13:24:53,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742135_1311 (size=12301) 2024-11-20T13:24:53,655 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into a9b59dcc82f6485e88238ba31a2d6742(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:53,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:53,655 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109093415; duration=0sec 2024-11-20T13:24:53,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:53,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:53,655 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:53,656 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/a08dcc8b9be64194aae31f6ace286afe 2024-11-20T13:24:53,670 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:24:53,676 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:53,676 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:53,676 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:53,676 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/60a6b32bd85d45d8b17d2f7df3f7e749, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.7 K 2024-11-20T13:24:53,680 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60a6b32bd85d45d8b17d2f7df3f7e749, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732109089970 2024-11-20T13:24:53,681 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77442a235cf548fb9cf240a091f8935c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732109090645 2024-11-20T13:24:53,682 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb8c1da388924f30a83ae4dce611b0d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:53,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/64ac7ee437ab4a379093975396a6bad5 is 50, key is test_row_0/B:col10/1732109092410/Put/seqid=0 2024-11-20T13:24:53,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109153719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109153719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,774 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:53,775 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/91e2b692a9a347d882332ccdea270619 is 50, key is test_row_0/C:col10/1732109092355/Put/seqid=0 2024-11-20T13:24:53,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742136_1312 (size=12301) 2024-11-20T13:24:53,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742137_1313 (size=13051) 2024-11-20T13:24:53,823 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/91e2b692a9a347d882332ccdea270619 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/91e2b692a9a347d882332ccdea270619 2024-11-20T13:24:53,828 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 91e2b692a9a347d882332ccdea270619(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:53,828 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:53,828 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109093415; duration=0sec 2024-11-20T13:24:53,828 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:53,828 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:53,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109153835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:53,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109153836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:53,960 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/cb20215100fc45d59ed664345e3fce51 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/cb20215100fc45d59ed664345e3fce51 2024-11-20T13:24:53,966 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into cb20215100fc45d59ed664345e3fce51(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:53,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:53,966 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109093414; duration=0sec 2024-11-20T13:24:53,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:53,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:54,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109154040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109154042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,186 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/64ac7ee437ab4a379093975396a6bad5 2024-11-20T13:24:54,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bdf94dbf0542cd8aa7eb5a1018665d is 50, key is test_row_0/C:col10/1732109092410/Put/seqid=0 2024-11-20T13:24:54,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742138_1314 (size=12301) 2024-11-20T13:24:54,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109154345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109154347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,633 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bdf94dbf0542cd8aa7eb5a1018665d 2024-11-20T13:24:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/a08dcc8b9be64194aae31f6ace286afe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe 2024-11-20T13:24:54,677 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T13:24:54,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/64ac7ee437ab4a379093975396a6bad5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5 2024-11-20T13:24:54,709 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T13:24:54,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bdf94dbf0542cd8aa7eb5a1018665d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d 2024-11-20T13:24:54,732 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T13:24:54,737 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b01f50ebe5529b12faadacb91472f69 in 1181ms, sequenceid=327, compaction requested=false 2024-11-20T13:24:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T13:24:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T13:24:54,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T13:24:54,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7660 sec 2024-11-20T13:24:54,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 3.7810 sec 2024-11-20T13:24:54,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:24:54,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:54,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:54,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:54,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/164a759f33e74e55a852ac63d31c8149 is 50, key is test_row_0/A:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:54,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109154914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:54,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109154920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:54,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742139_1315 (size=12301) 2024-11-20T13:24:55,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109155027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109155036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T13:24:55,089 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T13:24:55,096 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:55,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-20T13:24:55,098 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:55,098 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:55,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:55,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:55,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:55,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109155236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,250 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109155245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T13:24:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/164a759f33e74e55a852ac63d31c8149 2024-11-20T13:24:55,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d56ad203a0274d2299b61a34328321ff is 50, key is test_row_0/B:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:55,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742140_1316 (size=12301) 2024-11-20T13:24:55,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d56ad203a0274d2299b61a34328321ff 2024-11-20T13:24:55,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:55,404 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/daf99cd702de41bdab294f86e975f899 is 50, key is test_row_0/C:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:55,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T13:24:55,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:55,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:24:55,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742141_1317 (size=12301) 2024-11-20T13:24:55,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/daf99cd702de41bdab294f86e975f899 2024-11-20T13:24:55,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/164a759f33e74e55a852ac63d31c8149 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149 2024-11-20T13:24:55,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T13:24:55,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/d56ad203a0274d2299b61a34328321ff as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff 2024-11-20T13:24:55,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T13:24:55,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/daf99cd702de41bdab294f86e975f899 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899 2024-11-20T13:24:55,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T13:24:55,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 7b01f50ebe5529b12faadacb91472f69 in 644ms, sequenceid=356, compaction requested=true 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:55,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:24:55,506 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:55,506 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:55,512 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:55,513 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:55,513 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,513 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/91e2b692a9a347d882332ccdea270619, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.8 K 2024-11-20T13:24:55,513 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:55,513 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:55,513 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,513 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 91e2b692a9a347d882332ccdea270619, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:55,513 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/cb20215100fc45d59ed664345e3fce51, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.8 K 2024-11-20T13:24:55,514 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c9bdf94dbf0542cd8aa7eb5a1018665d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109092398 2024-11-20T13:24:55,514 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb20215100fc45d59ed664345e3fce51, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:55,516 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a08dcc8b9be64194aae31f6ace286afe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109092398 2024-11-20T13:24:55,516 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting daf99cd702de41bdab294f86e975f899, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:55,517 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 164a759f33e74e55a852ac63d31c8149, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:55,543 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:55,544 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/d245d2a9414b48968daa7d663be9b17d is 50, key is test_row_0/C:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:55,550 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:55,551 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/7a3c3a8c90ed47fbbb4971dc8686a1e3 is 50, key is test_row_0/A:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:55,565 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T13:24:55,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:24:55,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:55,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:55,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742142_1318 (size=13153) 2024-11-20T13:24:55,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/de704c83e30c4f9cbfa494de04d6fc09 is 50, key is test_row_0/A:col10/1732109094875/Put/seqid=0 2024-11-20T13:24:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742143_1319 (size=13153) 2024-11-20T13:24:55,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742144_1320 (size=12301) 2024-11-20T13:24:55,650 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/de704c83e30c4f9cbfa494de04d6fc09 2024-11-20T13:24:55,658 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/7a3c3a8c90ed47fbbb4971dc8686a1e3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7a3c3a8c90ed47fbbb4971dc8686a1e3 2024-11-20T13:24:55,659 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/d245d2a9414b48968daa7d663be9b17d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d245d2a9414b48968daa7d663be9b17d 2024-11-20T13:24:55,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/088afca5c2b349969d0dba415ca4f566 is 50, key is test_row_0/B:col10/1732109094875/Put/seqid=0 2024-11-20T13:24:55,669 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into d245d2a9414b48968daa7d663be9b17d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:55,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:55,669 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109095506; duration=0sec 2024-11-20T13:24:55,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:55,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:55,669 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:55,670 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:55,671 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:55,671 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 7a3c3a8c90ed47fbbb4971dc8686a1e3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:55,671 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:55,671 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:55,671 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109095506; duration=0sec 2024-11-20T13:24:55,671 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9b59dcc82f6485e88238ba31a2d6742, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.8 K 2024-11-20T13:24:55,671 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:55,671 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:55,671 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a9b59dcc82f6485e88238ba31a2d6742, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732109091195 2024-11-20T13:24:55,672 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 64ac7ee437ab4a379093975396a6bad5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109092398 2024-11-20T13:24:55,672 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d56ad203a0274d2299b61a34328321ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:55,708 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:55,708 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/cfb9115579f94edb993ec6eeba275e23 is 50, key is test_row_0/B:col10/1732109093710/Put/seqid=0 2024-11-20T13:24:55,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742145_1321 (size=12301) 2024-11-20T13:24:55,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109155728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109155729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,764 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/088afca5c2b349969d0dba415ca4f566 2024-11-20T13:24:55,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742146_1322 (size=13153) 2024-11-20T13:24:55,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e9a519e039fc447080189d746b51a031 is 50, key is test_row_0/C:col10/1732109094875/Put/seqid=0 2024-11-20T13:24:55,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742147_1323 (size=12301) 2024-11-20T13:24:55,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109155851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109155851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:55,871 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/cfb9115579f94edb993ec6eeba275e23 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cfb9115579f94edb993ec6eeba275e23 2024-11-20T13:24:55,961 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into cfb9115579f94edb993ec6eeba275e23(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:55,961 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:55,961 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109095506; duration=0sec 2024-11-20T13:24:55,961 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:55,962 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:56,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109156058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109156058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:56,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54058 deadline: 1732109156244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,247 DEBUG [Thread-1127 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:56,252 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e9a519e039fc447080189d746b51a031 2024-11-20T13:24:56,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/de704c83e30c4f9cbfa494de04d6fc09 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09 2024-11-20T13:24:56,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54074 deadline: 1732109156264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,267 DEBUG [Thread-1125 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8207 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:56,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54088 deadline: 1732109156279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,280 DEBUG [Thread-1129 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:24:56,291 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09, entries=150, sequenceid=369, filesize=12.0 K 2024-11-20T13:24:56,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/088afca5c2b349969d0dba415ca4f566 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566 2024-11-20T13:24:56,311 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566, entries=150, sequenceid=369, filesize=12.0 K 2024-11-20T13:24:56,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/e9a519e039fc447080189d746b51a031 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031 2024-11-20T13:24:56,330 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031, entries=150, sequenceid=369, filesize=12.0 K 2024-11-20T13:24:56,332 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7b01f50ebe5529b12faadacb91472f69 in 765ms, sequenceid=369, compaction requested=false 2024-11-20T13:24:56,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:56,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:56,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-20T13:24:56,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-20T13:24:56,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T13:24:56,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2380 sec 2024-11-20T13:24:56,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.2430 sec 2024-11-20T13:24:56,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:56,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:24:56,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:56,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:56,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:56,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:56,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:56,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:56,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/7859bc59633d428dbb0faeee0a4782f5 is 50, key is test_row_0/A:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:56,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109156398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109156400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742148_1324 (size=14741) 2024-11-20T13:24:56,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109156504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109156516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109156707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109156721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:56,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/7859bc59633d428dbb0faeee0a4782f5 2024-11-20T13:24:56,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/524e889c353f4dfc8480f331aede30d8 is 50, key is test_row_0/B:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742149_1325 (size=12301) 2024-11-20T13:24:56,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/524e889c353f4dfc8480f331aede30d8 2024-11-20T13:24:56,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/39f2f6eae5324e39922d1d6188572bf5 is 50, key is test_row_0/C:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742150_1326 (size=12301) 2024-11-20T13:24:57,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:57,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54130 deadline: 1732109157011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:57,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:24:57,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54108 deadline: 1732109157045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T13:24:57,204 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T13:24:57,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:24:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-20T13:24:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T13:24:57,230 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:24:57,240 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:24:57,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:24:57,244 DEBUG [Thread-1134 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bd3f35 to 127.0.0.1:53074 2024-11-20T13:24:57,244 DEBUG [Thread-1132 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3685bc77 to 127.0.0.1:53074 2024-11-20T13:24:57,245 DEBUG [Thread-1134 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,245 DEBUG [Thread-1132 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,246 DEBUG [Thread-1140 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b868ff8 to 127.0.0.1:53074 2024-11-20T13:24:57,246 DEBUG [Thread-1140 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,249 DEBUG [Thread-1138 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38280ccf to 127.0.0.1:53074 2024-11-20T13:24:57,249 DEBUG [Thread-1138 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,250 DEBUG [Thread-1136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2906ae40 to 127.0.0.1:53074 2024-11-20T13:24:57,250 DEBUG [Thread-1136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T13:24:57,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/39f2f6eae5324e39922d1d6188572bf5 2024-11-20T13:24:57,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/7859bc59633d428dbb0faeee0a4782f5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5 2024-11-20T13:24:57,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5, entries=200, sequenceid=397, filesize=14.4 K 2024-11-20T13:24:57,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/524e889c353f4dfc8480f331aede30d8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8 2024-11-20T13:24:57,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8, entries=150, sequenceid=397, filesize=12.0 K 2024-11-20T13:24:57,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/39f2f6eae5324e39922d1d6188572bf5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5 2024-11-20T13:24:57,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5, entries=150, sequenceid=397, filesize=12.0 K 2024-11-20T13:24:57,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b01f50ebe5529b12faadacb91472f69 in 983ms, sequenceid=397, compaction requested=true 2024-11-20T13:24:57,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:57,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:24:57,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:57,349 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:57,350 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:57,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:24:57,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:57,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b01f50ebe5529b12faadacb91472f69:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:24:57,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:57,351 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:57,351 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:57,351 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/B is initiating minor compaction (all files) 2024-11-20T13:24:57,351 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/A is initiating minor compaction (all files) 2024-11-20T13:24:57,351 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/B in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:57,351 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/A in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:57,351 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7a3c3a8c90ed47fbbb4971dc8686a1e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=39.3 K 2024-11-20T13:24:57,351 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cfb9115579f94edb993ec6eeba275e23, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.9 K 2024-11-20T13:24:57,352 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a3c3a8c90ed47fbbb4971dc8686a1e3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:57,352 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cfb9115579f94edb993ec6eeba275e23, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:57,352 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting de704c83e30c4f9cbfa494de04d6fc09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732109094875 2024-11-20T13:24:57,353 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 088afca5c2b349969d0dba415ca4f566, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732109094875 2024-11-20T13:24:57,353 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7859bc59633d428dbb0faeee0a4782f5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732109095707 2024-11-20T13:24:57,353 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 524e889c353f4dfc8480f331aede30d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732109095707 2024-11-20T13:24:57,366 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#A#compaction#282 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:57,366 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/5759f1281898463ab03eeb3106047143 is 50, key is test_row_0/A:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:57,366 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#B#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:57,367 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/ebc873bd9283445e82315215d05248e4 is 50, key is test_row_0/B:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742151_1327 (size=13255) 2024-11-20T13:24:57,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742152_1328 (size=13255) 2024-11-20T13:24:57,386 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/5759f1281898463ab03eeb3106047143 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/5759f1281898463ab03eeb3106047143 2024-11-20T13:24:57,389 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/ebc873bd9283445e82315215d05248e4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/ebc873bd9283445e82315215d05248e4 2024-11-20T13:24:57,393 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/A of 7b01f50ebe5529b12faadacb91472f69 into 5759f1281898463ab03eeb3106047143(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:57,393 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:57,393 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/A, priority=13, startTime=1732109097349; duration=0sec 2024-11-20T13:24:57,394 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:24:57,394 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:A 2024-11-20T13:24:57,394 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:24:57,396 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:24:57,396 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 7b01f50ebe5529b12faadacb91472f69/C is initiating minor compaction (all files) 2024-11-20T13:24:57,397 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b01f50ebe5529b12faadacb91472f69/C in TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:57,397 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d245d2a9414b48968daa7d663be9b17d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp, totalSize=36.9 K 2024-11-20T13:24:57,398 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d245d2a9414b48968daa7d663be9b17d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732109093657 2024-11-20T13:24:57,399 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/B of 7b01f50ebe5529b12faadacb91472f69 into ebc873bd9283445e82315215d05248e4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:57,399 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:57,399 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/B, priority=13, startTime=1732109097349; duration=0sec 2024-11-20T13:24:57,399 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:57,399 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:B 2024-11-20T13:24:57,400 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9a519e039fc447080189d746b51a031, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732109094875 2024-11-20T13:24:57,400 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39f2f6eae5324e39922d1d6188572bf5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732109095707 2024-11-20T13:24:57,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:24:57,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:57,404 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:24:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:24:57,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f09858aa7ac64444bb65f4c221ffd33a is 50, key is test_row_0/A:col10/1732109096369/Put/seqid=0 2024-11-20T13:24:57,411 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b01f50ebe5529b12faadacb91472f69#C#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:24:57,412 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/24b46217a8fd4c1790ab8c6d31d7ec2d is 50, key is test_row_0/C:col10/1732109095707/Put/seqid=0 2024-11-20T13:24:57,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742153_1329 (size=12301) 2024-11-20T13:24:57,414 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f09858aa7ac64444bb65f4c221ffd33a 2024-11-20T13:24:57,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742154_1330 (size=13255) 2024-11-20T13:24:57,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/6e27aa73aee44edd8f85ee44a33b56d9 is 50, key is test_row_0/B:col10/1732109096369/Put/seqid=0 2024-11-20T13:24:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742155_1331 (size=12301) 2024-11-20T13:24:57,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:24:57,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. as already flushing 2024-11-20T13:24:57,522 DEBUG [Thread-1123 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b1dacfc to 127.0.0.1:53074 2024-11-20T13:24:57,522 DEBUG [Thread-1123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T13:24:57,553 DEBUG [Thread-1121 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24414caa to 127.0.0.1:53074 2024-11-20T13:24:57,553 DEBUG [Thread-1121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:24:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T13:24:57,825 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/6e27aa73aee44edd8f85ee44a33b56d9 2024-11-20T13:24:57,825 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/24b46217a8fd4c1790ab8c6d31d7ec2d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/24b46217a8fd4c1790ab8c6d31d7ec2d 2024-11-20T13:24:57,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9467a101b56493b98b67f7d6b5924e5 is 50, key is test_row_0/C:col10/1732109096369/Put/seqid=0 2024-11-20T13:24:57,848 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b01f50ebe5529b12faadacb91472f69/C of 7b01f50ebe5529b12faadacb91472f69 into 24b46217a8fd4c1790ab8c6d31d7ec2d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:24:57,848 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:57,848 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69., storeName=7b01f50ebe5529b12faadacb91472f69/C, priority=13, startTime=1732109097350; duration=0sec 2024-11-20T13:24:57,848 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:24:57,848 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b01f50ebe5529b12faadacb91472f69:C 2024-11-20T13:24:57,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742156_1332 (size=12301) 2024-11-20T13:24:57,924 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9467a101b56493b98b67f7d6b5924e5 2024-11-20T13:24:57,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f09858aa7ac64444bb65f4c221ffd33a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f09858aa7ac64444bb65f4c221ffd33a 2024-11-20T13:24:57,963 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f09858aa7ac64444bb65f4c221ffd33a, entries=150, sequenceid=410, filesize=12.0 K 2024-11-20T13:24:57,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/6e27aa73aee44edd8f85ee44a33b56d9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/6e27aa73aee44edd8f85ee44a33b56d9 2024-11-20T13:24:57,988 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/6e27aa73aee44edd8f85ee44a33b56d9, entries=150, sequenceid=410, filesize=12.0 K 2024-11-20T13:24:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9467a101b56493b98b67f7d6b5924e5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9467a101b56493b98b67f7d6b5924e5 2024-11-20T13:24:58,025 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9467a101b56493b98b67f7d6b5924e5, entries=150, sequenceid=410, filesize=12.0 K 2024-11-20T13:24:58,036 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=13.42 KB/13740 for 7b01f50ebe5529b12faadacb91472f69 in 631ms, sequenceid=410, compaction requested=false 2024-11-20T13:24:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:24:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:24:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-20T13:24:58,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-20T13:24:58,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T13:24:58,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 812 msec 2024-11-20T13:24:58,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 880 msec 2024-11-20T13:24:58,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T13:24:58,326 INFO [Thread-1131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-20T13:25:06,275 DEBUG [Thread-1125 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32d6d0a0 to 127.0.0.1:53074 2024-11-20T13:25:06,275 DEBUG [Thread-1125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:06,314 DEBUG [Thread-1129 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c8c7dae to 127.0.0.1:53074 2024-11-20T13:25:06,314 DEBUG [Thread-1129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:06,344 DEBUG [Thread-1127 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0417da98 to 127.0.0.1:53074 2024-11-20T13:25:06,344 DEBUG [Thread-1127 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 107 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 141 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2361 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2267 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2065 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2356 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2234 2024-11-20T13:25:06,344 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:25:06,344 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:25:06,344 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65d62a07 to 127.0.0.1:53074 2024-11-20T13:25:06,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:06,347 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:25:06,348 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:25:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:06,352 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109106352"}]},"ts":"1732109106352"} 2024-11-20T13:25:06,354 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:25:06,357 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:25:06,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:25:06,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, UNASSIGN}] 2024-11-20T13:25:06,360 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, UNASSIGN 2024-11-20T13:25:06,361 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=7b01f50ebe5529b12faadacb91472f69, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:06,362 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:25:06,362 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:06,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:06,515 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:25:06,515 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:25:06,515 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 7b01f50ebe5529b12faadacb91472f69, disabling compactions & flushes 2024-11-20T13:25:06,515 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:25:06,515 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:25:06,515 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. after waiting 0 ms 2024-11-20T13:25:06,515 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:25:06,515 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing 7b01f50ebe5529b12faadacb91472f69 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=A 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=B 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b01f50ebe5529b12faadacb91472f69, store=C 2024-11-20T13:25:06,516 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:06,523 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f70ab575fb17442abcc411acf87ea845 is 50, key is test_row_0/A:col10/1732109097520/Put/seqid=0 2024-11-20T13:25:06,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742157_1333 (size=12301) 2024-11-20T13:25:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:06,929 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f70ab575fb17442abcc411acf87ea845 2024-11-20T13:25:06,939 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/72f63f68879c48f09671d0c18c1c7a90 is 50, key is test_row_0/B:col10/1732109097520/Put/seqid=0 2024-11-20T13:25:06,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:06,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742158_1334 (size=12301) 2024-11-20T13:25:07,361 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/72f63f68879c48f09671d0c18c1c7a90 2024-11-20T13:25:07,406 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bd4aee7bf94b1baac207fd09608b3a is 50, key is test_row_0/C:col10/1732109097520/Put/seqid=0 2024-11-20T13:25:07,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742159_1335 (size=12301) 2024-11-20T13:25:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:07,829 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bd4aee7bf94b1baac207fd09608b3a 2024-11-20T13:25:07,838 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/A/f70ab575fb17442abcc411acf87ea845 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f70ab575fb17442abcc411acf87ea845 2024-11-20T13:25:07,843 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f70ab575fb17442abcc411acf87ea845, entries=150, sequenceid=419, filesize=12.0 K 2024-11-20T13:25:07,845 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/B/72f63f68879c48f09671d0c18c1c7a90 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/72f63f68879c48f09671d0c18c1c7a90 2024-11-20T13:25:07,849 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/72f63f68879c48f09671d0c18c1c7a90, entries=150, sequenceid=419, filesize=12.0 K 2024-11-20T13:25:07,850 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/.tmp/C/c9bd4aee7bf94b1baac207fd09608b3a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bd4aee7bf94b1baac207fd09608b3a 2024-11-20T13:25:07,855 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bd4aee7bf94b1baac207fd09608b3a, entries=150, sequenceid=419, filesize=12.0 K 2024-11-20T13:25:07,856 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7b01f50ebe5529b12faadacb91472f69 in 1341ms, sequenceid=419, compaction requested=true 2024-11-20T13:25:07,857 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/815eddaa082d4b0db9eac83dc77e70ca, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4e76ff5db0a743bd911bcc10c5dd5ecb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d31b26256f8b4a57a09be4f373c12d09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/857cc02eb2ee4e4ab4c4af5f3d33476f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/df4161c9d46c40779028f986f297ca0e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/44e8a51c93284f1dba2e6874fa1b39cc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/cb20215100fc45d59ed664345e3fce51, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7a3c3a8c90ed47fbbb4971dc8686a1e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5] to archive 2024-11-20T13:25:07,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:07,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4cc9ebf4b16343408ab53f947961db1a 2024-11-20T13:25:07,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ee9a09de85ce4dd896cfbd1eb806dcfd 2024-11-20T13:25:07,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/688963f8620349b9807cba11e35dc323 2024-11-20T13:25:07,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/815eddaa082d4b0db9eac83dc77e70ca to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/815eddaa082d4b0db9eac83dc77e70ca 2024-11-20T13:25:07,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/684202332f6e4b949e0f2a3de52b74ec 2024-11-20T13:25:07,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/2e70f96922e844169d09a5200acc22ce 2024-11-20T13:25:07,880 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d3cb5e937f8e49028e80caa289e16012 2024-11-20T13:25:07,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4e76ff5db0a743bd911bcc10c5dd5ecb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/4e76ff5db0a743bd911bcc10c5dd5ecb 2024-11-20T13:25:07,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/1b5013f3a33a4bf4abbeb4f0d5f3d840 2024-11-20T13:25:07,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d1bb7863d67f4e6c91b3e4e9c4eb354f 2024-11-20T13:25:07,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d31b26256f8b4a57a09be4f373c12d09 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d31b26256f8b4a57a09be4f373c12d09 2024-11-20T13:25:07,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/0dda916686864ceb8d2a44554fc7870d 2024-11-20T13:25:07,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ab6669459f9a478297044d1d74464ec4 2024-11-20T13:25:07,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/857cc02eb2ee4e4ab4c4af5f3d33476f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/857cc02eb2ee4e4ab4c4af5f3d33476f 2024-11-20T13:25:07,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/b9194e444eac46f8ba2b9db79dc53c8f 2024-11-20T13:25:07,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/df4161c9d46c40779028f986f297ca0e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/df4161c9d46c40779028f986f297ca0e 2024-11-20T13:25:07,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/ef3ae4cb3898428eaff2e35e72607184 2024-11-20T13:25:07,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/411a7f067292426886148bbcf9e93692 2024-11-20T13:25:07,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/44e8a51c93284f1dba2e6874fa1b39cc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/44e8a51c93284f1dba2e6874fa1b39cc 2024-11-20T13:25:07,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/d44542d242f548a9abc9456e3b09a281 2024-11-20T13:25:07,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/6ed8146ef20d43b3bfea4afbc0bc91e3 2024-11-20T13:25:07,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/72a6aca2f324409bb92d9bf338ff8561 2024-11-20T13:25:07,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/cb20215100fc45d59ed664345e3fce51 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/cb20215100fc45d59ed664345e3fce51 2024-11-20T13:25:07,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/a08dcc8b9be64194aae31f6ace286afe 2024-11-20T13:25:07,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7a3c3a8c90ed47fbbb4971dc8686a1e3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7a3c3a8c90ed47fbbb4971dc8686a1e3 2024-11-20T13:25:07,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/164a759f33e74e55a852ac63d31c8149 2024-11-20T13:25:07,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/de704c83e30c4f9cbfa494de04d6fc09 2024-11-20T13:25:07,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/7859bc59633d428dbb0faeee0a4782f5 2024-11-20T13:25:07,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/41c51fd4ab8941fe9ea2662b16129fa8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/b6d335ac8d934633ba9144a12911397d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e15d86e4e96c4a9b89f375fe13e978e4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1a3851dad2574233b06bf8efd0e0809f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e264b278673b4600bf2cf9ead77b09a4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/3946f9ceec274b9aa1f43ca4caaa063d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9b59dcc82f6485e88238ba31a2d6742, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cfb9115579f94edb993ec6eeba275e23, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8] to archive 2024-11-20T13:25:07,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:07,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/273bc95931874a0faf53d985d7a2d4ad 2024-11-20T13:25:07,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/46c903d43e4044278eee6147f126548f 2024-11-20T13:25:07,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/41c51fd4ab8941fe9ea2662b16129fa8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/41c51fd4ab8941fe9ea2662b16129fa8 2024-11-20T13:25:07,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1f8f692d57cd475bb3a6e270e5c519de 2024-11-20T13:25:07,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7927fececc03404284005093da614552 2024-11-20T13:25:07,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/7a6bd02d17a84028a9ccada74e4386a0 2024-11-20T13:25:07,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/b6d335ac8d934633ba9144a12911397d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/b6d335ac8d934633ba9144a12911397d 2024-11-20T13:25:07,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5030247d319f4f609668437636b31284 2024-11-20T13:25:07,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cba8491641df4c6ba24c44cdb4c70fb2 2024-11-20T13:25:08,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e15d86e4e96c4a9b89f375fe13e978e4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e15d86e4e96c4a9b89f375fe13e978e4 2024-11-20T13:25:08,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d0470cf051944badb1e38f55bbf3a1e8 2024-11-20T13:25:08,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/37b9f4feb47a4fd5a68bf3e4cd7d6c9b 2024-11-20T13:25:08,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1a3851dad2574233b06bf8efd0e0809f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/1a3851dad2574233b06bf8efd0e0809f 2024-11-20T13:25:08,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e8bbd1b5a4da49a88bc9309b3366ca1a 2024-11-20T13:25:08,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9404fb9f3304b119fe5ee71c6b21a6e 2024-11-20T13:25:08,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e264b278673b4600bf2cf9ead77b09a4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e264b278673b4600bf2cf9ead77b09a4 2024-11-20T13:25:08,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/e6cdd6097db14214ae3119ba377629f8 2024-11-20T13:25:08,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f780ac07874f44b68ce3ea717e6dd486 2024-11-20T13:25:08,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/3946f9ceec274b9aa1f43ca4caaa063d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/3946f9ceec274b9aa1f43ca4caaa063d 2024-11-20T13:25:08,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d6c9c41019de43ddb79e95c755fb9ec3 2024-11-20T13:25:08,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/f89e8a2942964fe3a3912bfa8d58607c 2024-11-20T13:25:08,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9b59dcc82f6485e88238ba31a2d6742 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/a9b59dcc82f6485e88238ba31a2d6742 2024-11-20T13:25:08,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/5d4691fbd9f047c3bad2fc3674d68211 2024-11-20T13:25:08,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/64ac7ee437ab4a379093975396a6bad5 2024-11-20T13:25:08,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cfb9115579f94edb993ec6eeba275e23 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/cfb9115579f94edb993ec6eeba275e23 2024-11-20T13:25:08,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/d56ad203a0274d2299b61a34328321ff 2024-11-20T13:25:08,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/088afca5c2b349969d0dba415ca4f566 2024-11-20T13:25:08,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/524e889c353f4dfc8480f331aede30d8 2024-11-20T13:25:08,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c4ab8547de8c4f949ae6f7011d9cfe94, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/37b0b959e9004daebc064bea51e51749, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7166be2fcb7a404b9000095f9a14b331, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/04bfbb12377744a8a7609cfdec0dd02b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/33ae2690beab4ffe90868583e3b311e7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/60a6b32bd85d45d8b17d2f7df3f7e749, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/91e2b692a9a347d882332ccdea270619, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d245d2a9414b48968daa7d663be9b17d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5] to archive 2024-11-20T13:25:08,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:08,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/bc3dc8dee7cd4faebd803b5d1527423d 2024-11-20T13:25:08,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/4bd8652be3eb49efa4b9dcbd59282026 2024-11-20T13:25:08,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c4ab8547de8c4f949ae6f7011d9cfe94 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c4ab8547de8c4f949ae6f7011d9cfe94 2024-11-20T13:25:08,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/9731861c6fd949eaa03adf04eab49353 2024-11-20T13:25:08,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d4f107a7b92446f9baac10acd25958ff 2024-11-20T13:25:08,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e503073b18904699a3f3c75bee4a0df3 2024-11-20T13:25:08,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/37b0b959e9004daebc064bea51e51749 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/37b0b959e9004daebc064bea51e51749 2024-11-20T13:25:08,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/46531333cf5041a59678c218b5ea0f18 2024-11-20T13:25:08,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7253720da36a41649c8651f8b6a0e481 2024-11-20T13:25:08,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7166be2fcb7a404b9000095f9a14b331 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/7166be2fcb7a404b9000095f9a14b331 2024-11-20T13:25:08,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/15488a48131c49529ba9b5e52bdf0d53 2024-11-20T13:25:08,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c0bd2ee37b2a434989f1f4b19b116ad5 2024-11-20T13:25:08,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/04bfbb12377744a8a7609cfdec0dd02b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/04bfbb12377744a8a7609cfdec0dd02b 2024-11-20T13:25:08,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/f1ee4fe86ef14fe396e85ec65c7bb2c1 2024-11-20T13:25:08,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e09bb48bba174575a98d0d89224b5ac2 2024-11-20T13:25:08,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/33ae2690beab4ffe90868583e3b311e7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/33ae2690beab4ffe90868583e3b311e7 2024-11-20T13:25:08,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/1e6e7f9e5d5c44fe86a70d1910fd0113 2024-11-20T13:25:08,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/78773de2c9dd454d80caa3c62172107c 2024-11-20T13:25:08,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/60a6b32bd85d45d8b17d2f7df3f7e749 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/60a6b32bd85d45d8b17d2f7df3f7e749 2024-11-20T13:25:08,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/3bdb90dd3d3e46799ab7b4d1f133cccd 2024-11-20T13:25:08,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/77442a235cf548fb9cf240a091f8935c 2024-11-20T13:25:08,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/91e2b692a9a347d882332ccdea270619 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/91e2b692a9a347d882332ccdea270619 2024-11-20T13:25:08,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/fb8c1da388924f30a83ae4dce611b0d4 2024-11-20T13:25:08,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bdf94dbf0542cd8aa7eb5a1018665d 2024-11-20T13:25:08,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d245d2a9414b48968daa7d663be9b17d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/d245d2a9414b48968daa7d663be9b17d 2024-11-20T13:25:08,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/daf99cd702de41bdab294f86e975f899 2024-11-20T13:25:08,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/e9a519e039fc447080189d746b51a031 2024-11-20T13:25:08,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/39f2f6eae5324e39922d1d6188572bf5 2024-11-20T13:25:08,239 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/recovered.edits/422.seqid, newMaxSeqId=422, maxSeqId=1 2024-11-20T13:25:08,240 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69. 2024-11-20T13:25:08,240 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 7b01f50ebe5529b12faadacb91472f69: 2024-11-20T13:25:08,246 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:25:08,252 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=7b01f50ebe5529b12faadacb91472f69, regionState=CLOSED 2024-11-20T13:25:08,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T13:25:08,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 7b01f50ebe5529b12faadacb91472f69, server=5ef453f0fbb6,46739,1732109006137 in 1.8910 sec 2024-11-20T13:25:08,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T13:25:08,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b01f50ebe5529b12faadacb91472f69, UNASSIGN in 1.8980 sec 2024-11-20T13:25:08,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T13:25:08,265 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9020 sec 2024-11-20T13:25:08,266 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109108266"}]},"ts":"1732109108266"} 2024-11-20T13:25:08,267 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:25:08,275 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:25:08,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9270 sec 2024-11-20T13:25:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T13:25:08,458 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T13:25:08,459 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:25:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,461 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,461 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T13:25:08,479 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:25:08,484 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/recovered.edits] 2024-11-20T13:25:08,488 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/5759f1281898463ab03eeb3106047143 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/5759f1281898463ab03eeb3106047143 2024-11-20T13:25:08,490 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f09858aa7ac64444bb65f4c221ffd33a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f09858aa7ac64444bb65f4c221ffd33a 2024-11-20T13:25:08,491 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f70ab575fb17442abcc411acf87ea845 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/A/f70ab575fb17442abcc411acf87ea845 2024-11-20T13:25:08,495 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/6e27aa73aee44edd8f85ee44a33b56d9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/6e27aa73aee44edd8f85ee44a33b56d9 2024-11-20T13:25:08,496 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/72f63f68879c48f09671d0c18c1c7a90 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/72f63f68879c48f09671d0c18c1c7a90 2024-11-20T13:25:08,498 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/ebc873bd9283445e82315215d05248e4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/B/ebc873bd9283445e82315215d05248e4 2024-11-20T13:25:08,502 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/24b46217a8fd4c1790ab8c6d31d7ec2d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/24b46217a8fd4c1790ab8c6d31d7ec2d 2024-11-20T13:25:08,503 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9467a101b56493b98b67f7d6b5924e5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9467a101b56493b98b67f7d6b5924e5 2024-11-20T13:25:08,505 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bd4aee7bf94b1baac207fd09608b3a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/C/c9bd4aee7bf94b1baac207fd09608b3a 2024-11-20T13:25:08,508 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/recovered.edits/422.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69/recovered.edits/422.seqid 2024-11-20T13:25:08,509 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/7b01f50ebe5529b12faadacb91472f69 2024-11-20T13:25:08,509 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:25:08,512 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,516 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:25:08,524 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:25:08,525 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,525 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:25:08,525 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109108525"}]},"ts":"9223372036854775807"} 2024-11-20T13:25:08,527 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:25:08,528 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7b01f50ebe5529b12faadacb91472f69, NAME => 'TestAcidGuarantees,,1732109074710.7b01f50ebe5529b12faadacb91472f69.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:25:08,528 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:25:08,528 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109108528"}]},"ts":"9223372036854775807"} 2024-11-20T13:25:08,533 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:25:08,536 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 77 msec 2024-11-20T13:25:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T13:25:08,563 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-20T13:25:08,576 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 237), OpenFileDescriptor=446 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1193 (was 1066) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1148 (was 542) - AvailableMemoryMB LEAK? - 2024-11-20T13:25:08,588 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=1193, ProcessCount=11, AvailableMemoryMB=1148 2024-11-20T13:25:08,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:25:08,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:25:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:08,593 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:25:08,593 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:08,593 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-11-20T13:25:08,594 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:25:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:08,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742160_1336 (size=963) 2024-11-20T13:25:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:09,004 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:25:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742161_1337 (size=53) 2024-11-20T13:25:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 33af58af6776fca1f72685bf60c347d3, disabling compactions & flushes 2024-11-20T13:25:09,415 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. after waiting 0 ms 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,415 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,415 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:09,417 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:25:09,417 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109109417"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109109417"}]},"ts":"1732109109417"} 2024-11-20T13:25:09,418 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:25:09,419 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:25:09,419 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109109419"}]},"ts":"1732109109419"} 2024-11-20T13:25:09,421 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:25:09,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, ASSIGN}] 2024-11-20T13:25:09,644 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, ASSIGN 2024-11-20T13:25:09,645 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:25:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:09,796 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:09,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:09,949 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:09,952 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,952 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:25:09,952 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,952 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:25:09,953 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,953 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,954 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,964 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:09,965 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName A 2024-11-20T13:25:09,965 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:09,965 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:09,965 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,966 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:09,967 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName B 2024-11-20T13:25:09,967 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:09,967 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:09,967 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,968 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:09,969 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName C 2024-11-20T13:25:09,969 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:09,969 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:09,969 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,970 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,970 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,972 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:25:09,973 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:09,975 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:25:09,976 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 33af58af6776fca1f72685bf60c347d3; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59399090, jitterRate=-0.11488458514213562}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:25:09,977 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:09,977 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., pid=98, masterSystemTime=1732109109948 2024-11-20T13:25:09,980 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:09,980 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,980 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:09,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T13:25:09,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 in 184 msec 2024-11-20T13:25:09,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T13:25:09,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, ASSIGN in 341 msec 2024-11-20T13:25:09,985 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:25:09,985 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109109985"}]},"ts":"1732109109985"} 2024-11-20T13:25:09,986 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:25:10,088 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:25:10,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.4980 sec 2024-11-20T13:25:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T13:25:10,705 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-20T13:25:10,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a3c6746 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32d439fa 2024-11-20T13:25:10,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d4d36d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:10,820 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:10,821 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:10,825 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:25:10,826 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:25:10,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:25:10,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:25:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:10,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742162_1338 (size=999) 2024-11-20T13:25:10,861 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T13:25:10,861 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T13:25:10,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:25:10,867 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, REOPEN/MOVE}] 2024-11-20T13:25:10,868 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, REOPEN/MOVE 2024-11-20T13:25:10,869 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:10,870 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:25:10,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:11,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:11,022 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:25:11,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 33af58af6776fca1f72685bf60c347d3, disabling compactions & flushes 2024-11-20T13:25:11,022 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. after waiting 0 ms 2024-11-20T13:25:11,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,027 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T13:25:11,028 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,028 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:11,028 WARN [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 33af58af6776fca1f72685bf60c347d3 to self. 2024-11-20T13:25:11,030 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,030 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=CLOSED 2024-11-20T13:25:11,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-20T13:25:11,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 in 162 msec 2024-11-20T13:25:11,035 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, REOPEN/MOVE; state=CLOSED, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=true 2024-11-20T13:25:11,186 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:11,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:11,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:11,343 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,343 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:25:11,344 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,344 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:25:11,344 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,344 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,345 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,347 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:11,347 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName A 2024-11-20T13:25:11,348 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:11,349 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:11,349 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,350 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:11,350 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName B 2024-11-20T13:25:11,351 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:11,351 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:11,351 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,352 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:11,352 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33af58af6776fca1f72685bf60c347d3 columnFamilyName C 2024-11-20T13:25:11,352 DEBUG [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:11,353 INFO [StoreOpener-33af58af6776fca1f72685bf60c347d3-1 {}] regionserver.HStore(327): Store=33af58af6776fca1f72685bf60c347d3/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:11,353 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,356 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,357 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,359 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:25:11,361 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:11,363 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 33af58af6776fca1f72685bf60c347d3; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71705637, jitterRate=0.06849725544452667}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:25:11,364 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:11,365 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., pid=103, masterSystemTime=1732109111339 2024-11-20T13:25:11,367 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,367 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:11,367 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=OPEN, openSeqNum=5, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:11,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-11-20T13:25:11,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 in 181 msec 2024-11-20T13:25:11,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T13:25:11,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, REOPEN/MOVE in 503 msec 2024-11-20T13:25:11,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-20T13:25:11,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 508 msec 2024-11-20T13:25:11,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 536 msec 2024-11-20T13:25:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-20T13:25:11,378 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50206885 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43288092 2024-11-20T13:25:11,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16326c21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:11,558 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x659f4c7c to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62347c4d 2024-11-20T13:25:11,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6762574e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:11,650 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d2f5cd9 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4cb88f25 2024-11-20T13:25:11,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@399569db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:11,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ce70a91 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@342e7b8f 2024-11-20T13:25:11,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26670760, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:11,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x23fd6c87 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64c89974 2024-11-20T13:25:11,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a8ade8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:11,950 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x670c8f03 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4bde73e6 2024-11-20T13:25:12,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37d041d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:12,058 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0edf02c6 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36a611b8 2024-11-20T13:25:12,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd0066, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:12,150 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f9705e1 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@393ae87a 2024-11-20T13:25:12,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@529f110b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:12,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bbfc363 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@35289956 2024-11-20T13:25:12,348 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f95c1b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:12,350 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65a3d8e5 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14301f2e 2024-11-20T13:25:12,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2599cc50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:12,428 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:12,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T13:25:12,431 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:12,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:12,431 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:12,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:12,432 DEBUG [hconnection-0x70fddd2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,433 DEBUG [hconnection-0x780b56d8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,434 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,435 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,436 DEBUG [hconnection-0x6aee208e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,436 DEBUG [hconnection-0x7f12bf78-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,437 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,437 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,473 DEBUG [hconnection-0x309d2101-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,474 DEBUG [hconnection-0xa46ce5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,476 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,481 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:12,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:25:12,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:12,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:12,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:12,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:12,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:12,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:12,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109172513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109172515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109172515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,520 DEBUG [hconnection-0x49347c6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,522 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,522 DEBUG [hconnection-0x23880b4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109172524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,525 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,526 DEBUG [hconnection-0x2073d035-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,527 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53044, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:12,560 DEBUG [hconnection-0x54e61ba6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:12,562 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:12,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109172563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:12,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:12,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120137baece001049858208bcb70a66de92_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109112484/Put/seqid=0 2024-11-20T13:25:12,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742163_1339 (size=12154) 2024-11-20T13:25:12,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109172620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109172621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109172621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109172633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109172667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:12,748 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109172832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109172834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109172834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109172840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109172875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,904 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:12,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:12,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:12,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,019 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:13,024 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120137baece001049858208bcb70a66de92_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120137baece001049858208bcb70a66de92_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:13,028 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cef4fb1cbc354a40a074f3530601af57, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:13,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cef4fb1cbc354a40a074f3530601af57 is 175, key is test_row_0/A:col10/1732109112484/Put/seqid=0 2024-11-20T13:25:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:13,059 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:13,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:13,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742164_1340 (size=30955) 2024-11-20T13:25:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cef4fb1cbc354a40a074f3530601af57 2024-11-20T13:25:13,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0ce0a57b1a0c421cbb350c99287fa3c7 is 50, key is test_row_0/B:col10/1732109112484/Put/seqid=0 2024-11-20T13:25:13,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109173140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742165_1341 (size=12001) 2024-11-20T13:25:13,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109173142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109173144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109173144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0ce0a57b1a0c421cbb350c99287fa3c7 2024-11-20T13:25:13,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109173185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/f43f5c77728b449bb95c167023781582 is 50, key is test_row_0/C:col10/1732109112484/Put/seqid=0 2024-11-20T13:25:13,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742166_1342 (size=12001) 2024-11-20T13:25:13,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/f43f5c77728b449bb95c167023781582 2024-11-20T13:25:13,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cef4fb1cbc354a40a074f3530601af57 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57 2024-11-20T13:25:13,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T13:25:13,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0ce0a57b1a0c421cbb350c99287fa3c7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7 2024-11-20T13:25:13,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:25:13,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/f43f5c77728b449bb95c167023781582 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582 2024-11-20T13:25:13,372 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:13,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:25:13,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 33af58af6776fca1f72685bf60c347d3 in 904ms, sequenceid=15, compaction requested=false 2024-11-20T13:25:13,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:13,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T13:25:13,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:13,535 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:13,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8b79872a3414ef0ada3e42706ab2077_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109112507/Put/seqid=0 2024-11-20T13:25:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742167_1343 (size=12154) 2024-11-20T13:25:13,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:13,631 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f8b79872a3414ef0ada3e42706ab2077_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8b79872a3414ef0ada3e42706ab2077_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:13,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd is 175, key is test_row_0/A:col10/1732109112507/Put/seqid=0 2024-11-20T13:25:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:13,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:13,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109173676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109173676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742168_1344 (size=30955) 2024-11-20T13:25:13,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109173690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109173691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109173702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,790 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:25:13,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109173793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109173797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109173811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:13,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109173824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109174007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109174020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109174020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109174041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,124 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd 2024-11-20T13:25:14,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/64291bd35cd94996828c2a538a45a778 is 50, key is test_row_0/B:col10/1732109112507/Put/seqid=0 2024-11-20T13:25:14,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742169_1345 (size=12001) 2024-11-20T13:25:14,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109174317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109174338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109174340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109174352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:14,651 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/64291bd35cd94996828c2a538a45a778 2024-11-20T13:25:14,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0f514d7af1e647d0aa79f610d95aaa50 is 50, key is test_row_0/C:col10/1732109112507/Put/seqid=0 2024-11-20T13:25:14,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742170_1346 (size=12001) 2024-11-20T13:25:14,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109174734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109174828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109174852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109174852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:14,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:14,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109174864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:15,124 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0f514d7af1e647d0aa79f610d95aaa50 2024-11-20T13:25:15,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd 2024-11-20T13:25:15,141 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T13:25:15,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/64291bd35cd94996828c2a538a45a778 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778 2024-11-20T13:25:15,146 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T13:25:15,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0f514d7af1e647d0aa79f610d95aaa50 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50 2024-11-20T13:25:15,175 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T13:25:15,188 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 33af58af6776fca1f72685bf60c347d3 in 1653ms, sequenceid=40, compaction requested=false 2024-11-20T13:25:15,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:15,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:15,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T13:25:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T13:25:15,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T13:25:15,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7650 sec 2024-11-20T13:25:15,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.7740 sec 2024-11-20T13:25:15,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:15,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:15,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:15,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204ed6d1697fd2419192f38d75a2f9db5d_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:15,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742171_1347 (size=14594) 2024-11-20T13:25:15,916 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:15,953 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204ed6d1697fd2419192f38d75a2f9db5d_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204ed6d1697fd2419192f38d75a2f9db5d_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:15,954 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/545a742a6d8c4d28a6e44499e7ea4a00, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:15,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/545a742a6d8c4d28a6e44499e7ea4a00 is 175, key is test_row_0/A:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:15,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742172_1348 (size=39549) 2024-11-20T13:25:15,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/545a742a6d8c4d28a6e44499e7ea4a00 2024-11-20T13:25:15,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:15,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109175979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109175994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109175994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109175997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/abdb996d3bb44460824e57cb663bec42 is 50, key is test_row_0/B:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742173_1349 (size=12001) 2024-11-20T13:25:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109176096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109176111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109176112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109176116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109176312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109176325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109176328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109176336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/abdb996d3bb44460824e57cb663bec42 2024-11-20T13:25:16,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6bbfa61504da43d898a53fdcf77683be is 50, key is test_row_0/C:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T13:25:16,552 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T13:25:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742174_1350 (size=12001) 2024-11-20T13:25:16,560 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T13:25:16,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6bbfa61504da43d898a53fdcf77683be 2024-11-20T13:25:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:16,562 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:16,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:16,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:16,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/545a742a6d8c4d28a6e44499e7ea4a00 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00 2024-11-20T13:25:16,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00, entries=200, sequenceid=52, filesize=38.6 K 2024-11-20T13:25:16,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/abdb996d3bb44460824e57cb663bec42 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42 2024-11-20T13:25:16,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T13:25:16,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6bbfa61504da43d898a53fdcf77683be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be 2024-11-20T13:25:16,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T13:25:16,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 33af58af6776fca1f72685bf60c347d3 in 720ms, sequenceid=52, compaction requested=true 2024-11-20T13:25:16,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:16,590 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:16,591 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:16,592 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:16,592 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:16,592 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,592 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=99.1 K 2024-11-20T13:25:16,592 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,592 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00] 2024-11-20T13:25:16,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:16,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:16,593 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,593 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=35.2 K 2024-11-20T13:25:16,593 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cef4fb1cbc354a40a074f3530601af57, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109112475 2024-11-20T13:25:16,594 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ce0a57b1a0c421cbb350c99287fa3c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109112475 2024-11-20T13:25:16,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:16,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:16,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:16,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:16,594 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75b649cd5a4a4fa7b9df0cf0abf9a8fd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732109112507 2024-11-20T13:25:16,594 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 64291bd35cd94996828c2a538a45a778, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732109112507 2024-11-20T13:25:16,594 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 545a742a6d8c4d28a6e44499e7ea4a00, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:16,595 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting abdb996d3bb44460824e57cb663bec42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:16,622 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:16,623 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/a60fef2f99ab459386d1013ad22236d4 is 50, key is test_row_0/B:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:16,625 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:16,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:16,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:16,636 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112023a1186e7a324b31bf1eebf3940ad749_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:16,638 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112023a1186e7a324b31bf1eebf3940ad749_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:16,638 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112023a1186e7a324b31bf1eebf3940ad749_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:16,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742175_1351 (size=12104) 2024-11-20T13:25:16,654 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/a60fef2f99ab459386d1013ad22236d4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/a60fef2f99ab459386d1013ad22236d4 2024-11-20T13:25:16,660 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into a60fef2f99ab459386d1013ad22236d4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:16,660 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:16,660 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109116591; duration=0sec 2024-11-20T13:25:16,660 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:16,660 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:16,661 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:16,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:16,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e8673d611c1460c84ae140dd827cfe5_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109115990/Put/seqid=0 2024-11-20T13:25:16,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109176649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109176660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,673 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:16,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:16,674 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,674 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=35.2 K 2024-11-20T13:25:16,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f43f5c77728b449bb95c167023781582, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109112475 2024-11-20T13:25:16,675 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f514d7af1e647d0aa79f610d95aaa50, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732109112507 2024-11-20T13:25:16,675 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bbfa61504da43d898a53fdcf77683be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:16,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109176667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109176671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742176_1352 (size=4469) 2024-11-20T13:25:16,688 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#301 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:16,689 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5a1089b3763f4338a6dfa891f3558db6 is 175, key is test_row_0/A:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:16,698 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:16,699 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/df0b3f1ed9214d50911c7d20019c9ab9 is 50, key is test_row_0/C:col10/1732109113679/Put/seqid=0 2024-11-20T13:25:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742177_1353 (size=14594) 2024-11-20T13:25:16,710 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:16,715 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e8673d611c1460c84ae140dd827cfe5_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e8673d611c1460c84ae140dd827cfe5_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:16,716 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dc1eed49c7f54cbb83a9e15af9d79bb0, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:16,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dc1eed49c7f54cbb83a9e15af9d79bb0 is 175, key is test_row_0/A:col10/1732109115990/Put/seqid=0 2024-11-20T13:25:16,720 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:16,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:16,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742179_1355 (size=12104) 2024-11-20T13:25:16,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742178_1354 (size=31058) 2024-11-20T13:25:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742180_1356 (size=39549) 2024-11-20T13:25:16,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109176768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109176773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109176780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,781 DEBUG [Thread-1539 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4294 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:25:16,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109176781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:16,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:16,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:16,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109176982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109176983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:16,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109176986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,043 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:17,193 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dc1eed49c7f54cbb83a9e15af9d79bb0 2024-11-20T13:25:17,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109177194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,215 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5a1089b3763f4338a6dfa891f3558db6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6 2024-11-20T13:25:17,216 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/df0b3f1ed9214d50911c7d20019c9ab9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/df0b3f1ed9214d50911c7d20019c9ab9 2024-11-20T13:25:17,228 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into df0b3f1ed9214d50911c7d20019c9ab9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:17,229 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109116594; duration=0sec 2024-11-20T13:25:17,229 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 5a1089b3763f4338a6dfa891f3558db6(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:17,229 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109116590; duration=0sec 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:17,229 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:17,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e77e02257ad8447193c578a71030eff3 is 50, key is test_row_0/B:col10/1732109115990/Put/seqid=0 2024-11-20T13:25:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742181_1357 (size=12001) 2024-11-20T13:25:17,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109177290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109177292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109177295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,356 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e77e02257ad8447193c578a71030eff3 2024-11-20T13:25:17,669 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:17,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/ae9689c14a764f1680e96f4d50fed7fc is 50, key is test_row_0/C:col10/1732109115990/Put/seqid=0 2024-11-20T13:25:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742182_1358 (size=12001) 2024-11-20T13:25:17,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/ae9689c14a764f1680e96f4d50fed7fc 2024-11-20T13:25:17,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dc1eed49c7f54cbb83a9e15af9d79bb0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0 2024-11-20T13:25:17,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0, entries=200, sequenceid=78, filesize=38.6 K 2024-11-20T13:25:17,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e77e02257ad8447193c578a71030eff3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3 2024-11-20T13:25:17,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T13:25:17,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/ae9689c14a764f1680e96f4d50fed7fc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc 2024-11-20T13:25:17,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T13:25:17,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 33af58af6776fca1f72685bf60c347d3 in 1164ms, sequenceid=78, compaction requested=false 2024-11-20T13:25:17,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:17,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:17,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:17,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:17,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:17,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:17,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:17,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:17,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044d774642d34443e8279edad4b5f0ca2_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:17,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742183_1359 (size=17034) 2024-11-20T13:25:17,904 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:17,910 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044d774642d34443e8279edad4b5f0ca2_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044d774642d34443e8279edad4b5f0ca2_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:17,911 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dac8e1a0eacc410297a3cd787cad3542, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:17,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dac8e1a0eacc410297a3cd787cad3542 is 175, key is test_row_0/A:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:17,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742184_1360 (size=48139) 2024-11-20T13:25:17,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109177957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,968 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dac8e1a0eacc410297a3cd787cad3542 2024-11-20T13:25:17,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109177966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:17,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109177971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,992 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:17,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:17,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:17,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/31d6120b5fed4785b32fb70f00668927 is 50, key is test_row_0/B:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:18,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742185_1361 (size=12001) 2024-11-20T13:25:18,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/31d6120b5fed4785b32fb70f00668927 2024-11-20T13:25:18,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109178068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109178080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/c83c4da5db03466f8b39505f44dbd3d2 is 50, key is test_row_0/C:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:18,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109178097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742186_1362 (size=12001) 2024-11-20T13:25:18,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/c83c4da5db03466f8b39505f44dbd3d2 2024-11-20T13:25:18,149 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/dac8e1a0eacc410297a3cd787cad3542 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542 2024-11-20T13:25:18,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542, entries=250, sequenceid=95, filesize=47.0 K 2024-11-20T13:25:18,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/31d6120b5fed4785b32fb70f00668927 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927 2024-11-20T13:25:18,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927, entries=150, sequenceid=95, filesize=11.7 K 2024-11-20T13:25:18,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/c83c4da5db03466f8b39505f44dbd3d2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2 2024-11-20T13:25:18,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2, entries=150, sequenceid=95, filesize=11.7 K 2024-11-20T13:25:18,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 33af58af6776fca1f72685bf60c347d3 in 383ms, sequenceid=95, compaction requested=true 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:18,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:25:18,198 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:18,200 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:18,200 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118746 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:18,200 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:18,200 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,200 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=116.0 K 2024-11-20T13:25:18,200 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,201 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542] 2024-11-20T13:25:18,201 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a1089b3763f4338a6dfa891f3558db6, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:18,202 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc1eed49c7f54cbb83a9e15af9d79bb0, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109115968 2024-11-20T13:25:18,202 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dac8e1a0eacc410297a3cd787cad3542, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:18,205 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:18,205 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:18,205 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,205 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/df0b3f1ed9214d50911c7d20019c9ab9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=35.3 K 2024-11-20T13:25:18,206 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting df0b3f1ed9214d50911c7d20019c9ab9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:18,206 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting ae9689c14a764f1680e96f4d50fed7fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109115989 2024-11-20T13:25:18,207 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c83c4da5db03466f8b39505f44dbd3d2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:18,222 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:18,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:18,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:18,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:18,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:18,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:18,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:18,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:18,234 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#310 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:18,235 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/1b07d521b74346c787769d929a487f19 is 50, key is test_row_0/C:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:18,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:18,246 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120046d844d16a443bb9192113ea6057e9c_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:18,249 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120046d844d16a443bb9192113ea6057e9c_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:18,249 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120046d844d16a443bb9192113ea6057e9c_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:18,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742187_1363 (size=12207) 2024-11-20T13:25:18,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109178317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109178317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120594eba400df148cb950f74393aecfc66_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109117963/Put/seqid=0 2024-11-20T13:25:18,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109178329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109178333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742188_1364 (size=4469) 2024-11-20T13:25:18,365 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#309 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:18,365 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/d6ca9eebed374760b2129da263831267 is 175, key is test_row_0/A:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:18,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742190_1366 (size=31161) 2024-11-20T13:25:18,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742189_1365 (size=14594) 2024-11-20T13:25:18,425 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/d6ca9eebed374760b2129da263831267 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267 2024-11-20T13:25:18,432 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:18,435 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into d6ca9eebed374760b2129da263831267(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:18,435 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:18,435 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109118197; duration=0sec 2024-11-20T13:25:18,435 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:18,435 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:18,435 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:18,447 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:18,447 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:18,447 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,447 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/a60fef2f99ab459386d1013ad22236d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=35.3 K 2024-11-20T13:25:18,448 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a60fef2f99ab459386d1013ad22236d4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732109113674 2024-11-20T13:25:18,449 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e77e02257ad8447193c578a71030eff3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109115989 2024-11-20T13:25:18,449 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31d6120b5fed4785b32fb70f00668927, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:18,450 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120594eba400df148cb950f74393aecfc66_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120594eba400df148cb950f74393aecfc66_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:18,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109178436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109178451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/eb6451f2571849adb224bee5fcbd9c07, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:18,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/eb6451f2571849adb224bee5fcbd9c07 is 175, key is test_row_0/A:col10/1732109117963/Put/seqid=0 2024-11-20T13:25:18,474 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:18,475 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/1607178b1c8840ad99c0b5292e779f98 is 50, key is test_row_0/B:col10/1732109117811/Put/seqid=0 2024-11-20T13:25:18,475 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109178453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742191_1367 (size=39549) 2024-11-20T13:25:18,505 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/eb6451f2571849adb224bee5fcbd9c07 2024-11-20T13:25:18,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742192_1368 (size=12207) 2024-11-20T13:25:18,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/922556f11c864a0997483d492b854423 is 50, key is test_row_0/B:col10/1732109117963/Put/seqid=0 2024-11-20T13:25:18,547 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/1607178b1c8840ad99c0b5292e779f98 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/1607178b1c8840ad99c0b5292e779f98 2024-11-20T13:25:18,553 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into 1607178b1c8840ad99c0b5292e779f98(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:18,553 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:18,553 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109118197; duration=0sec 2024-11-20T13:25:18,553 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:18,553 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:18,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742193_1369 (size=12001) 2024-11-20T13:25:18,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/922556f11c864a0997483d492b854423 2024-11-20T13:25:18,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0aeae30597f14641bf6434439f2b4a12 is 50, key is test_row_0/C:col10/1732109117963/Put/seqid=0 2024-11-20T13:25:18,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742194_1370 (size=12001) 2024-11-20T13:25:18,635 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109178637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109178654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109178657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109178684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:18,700 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/1b07d521b74346c787769d929a487f19 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1b07d521b74346c787769d929a487f19 2024-11-20T13:25:18,707 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 1b07d521b74346c787769d929a487f19(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:18,707 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:18,707 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109118197; duration=0sec 2024-11-20T13:25:18,707 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:18,707 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:18,788 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,942 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:18,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:18,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:18,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:18,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109178957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:18,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109178960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109178998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0aeae30597f14641bf6434439f2b4a12 2024-11-20T13:25:19,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/eb6451f2571849adb224bee5fcbd9c07 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07 2024-11-20T13:25:19,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07, entries=200, sequenceid=119, filesize=38.6 K 2024-11-20T13:25:19,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/922556f11c864a0997483d492b854423 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423 2024-11-20T13:25:19,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T13:25:19,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0aeae30597f14641bf6434439f2b4a12 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12 2024-11-20T13:25:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T13:25:19,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 33af58af6776fca1f72685bf60c347d3 in 835ms, sequenceid=119, compaction requested=false 2024-11-20T13:25:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T13:25:19,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:19,099 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208fbbda440d244bb5b06ba739337f1b5a_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_1/A:col10/1732109118330/Put/seqid=0 2024-11-20T13:25:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742195_1371 (size=9814) 2024-11-20T13:25:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,216 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208fbbda440d244bb5b06ba739337f1b5a_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208fbbda440d244bb5b06ba739337f1b5a_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:19,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/a37bdc631ca64444a20d24aa01043d9a, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:19,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/a37bdc631ca64444a20d24aa01043d9a is 175, key is test_row_1/A:col10/1732109118330/Put/seqid=0 2024-11-20T13:25:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:19,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:19,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742196_1372 (size=22461) 2024-11-20T13:25:19,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,240 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/a37bdc631ca64444a20d24aa01043d9a 2024-11-20T13:25:19,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/730cc30a65894f9d991509ccdade2c58 is 50, key is test_row_1/B:col10/1732109118330/Put/seqid=0 2024-11-20T13:25:19,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742197_1373 (size=9757) 2024-11-20T13:25:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109179483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109179484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109179488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109179504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109179597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109179598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109179612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,680 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/730cc30a65894f9d991509ccdade2c58 2024-11-20T13:25:19,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/8c99d895dac941e4b1662be30f93f075 is 50, key is test_row_1/C:col10/1732109118330/Put/seqid=0 2024-11-20T13:25:19,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742198_1374 (size=9757) 2024-11-20T13:25:19,744 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/8c99d895dac941e4b1662be30f93f075 2024-11-20T13:25:19,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/a37bdc631ca64444a20d24aa01043d9a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a 2024-11-20T13:25:19,764 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a, entries=100, sequenceid=134, filesize=21.9 K 2024-11-20T13:25:19,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/730cc30a65894f9d991509ccdade2c58 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58 2024-11-20T13:25:19,770 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58, entries=100, sequenceid=134, filesize=9.5 K 2024-11-20T13:25:19,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/8c99d895dac941e4b1662be30f93f075 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075 2024-11-20T13:25:19,778 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075, entries=100, sequenceid=134, filesize=9.5 K 2024-11-20T13:25:19,781 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 33af58af6776fca1f72685bf60c347d3 in 683ms, sequenceid=134, compaction requested=true 2024-11-20T13:25:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:19,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T13:25:19,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T13:25:19,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T13:25:19,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2340 sec 2024-11-20T13:25:19,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 3.2410 sec 2024-11-20T13:25:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:19,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:19,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:19,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dc5be2fdb9a24ffd8b4a9fc64910522e_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:19,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109179847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109179851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109179852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742199_1375 (size=12304) 2024-11-20T13:25:19,893 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:19,908 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dc5be2fdb9a24ffd8b4a9fc64910522e_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dc5be2fdb9a24ffd8b4a9fc64910522e_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:19,913 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5269cc0374154390a519f1c0375da62d, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:19,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5269cc0374154390a519f1c0375da62d is 175, key is test_row_0/A:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:19,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742200_1376 (size=31105) 2024-11-20T13:25:19,952 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=162, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5269cc0374154390a519f1c0375da62d 2024-11-20T13:25:19,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/760b73e37b774dc4b7a4fd2fc827bdc6 is 50, key is test_row_0/B:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:19,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109179969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109179970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:19,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109179971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742201_1377 (size=12151) 2024-11-20T13:25:20,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109180177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109180178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109180181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/760b73e37b774dc4b7a4fd2fc827bdc6 2024-11-20T13:25:20,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/22ce7e2efad74f549617d2dcc63e6e20 is 50, key is test_row_0/C:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:20,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742202_1378 (size=12151) 2024-11-20T13:25:20,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/22ce7e2efad74f549617d2dcc63e6e20 2024-11-20T13:25:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/5269cc0374154390a519f1c0375da62d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d 2024-11-20T13:25:20,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d, entries=150, sequenceid=162, filesize=30.4 K 2024-11-20T13:25:20,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/760b73e37b774dc4b7a4fd2fc827bdc6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6 2024-11-20T13:25:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T13:25:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/22ce7e2efad74f549617d2dcc63e6e20 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20 2024-11-20T13:25:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T13:25:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 33af58af6776fca1f72685bf60c347d3 in 660ms, sequenceid=162, compaction requested=true 2024-11-20T13:25:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:20,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:20,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:20,477 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:20,477 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:20,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:20,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:20,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,478 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124276 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:20,478 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:20,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,478 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:20,478 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:20,478 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,479 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=121.4 K 2024-11-20T13:25:20,479 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,479 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,479 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/1607178b1c8840ad99c0b5292e779f98, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=45.0 K 2024-11-20T13:25:20,479 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d] 2024-11-20T13:25:20,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,479 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6ca9eebed374760b2129da263831267, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:20,479 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1607178b1c8840ad99c0b5292e779f98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:20,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,480 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 922556f11c864a0997483d492b854423, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732109117962 2024-11-20T13:25:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,480 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 730cc30a65894f9d991509ccdade2c58, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732109118326 2024-11-20T13:25:20,480 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb6451f2571849adb224bee5fcbd9c07, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732109117945 2024-11-20T13:25:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,480 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a37bdc631ca64444a20d24aa01043d9a, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732109118326 2024-11-20T13:25:20,481 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 760b73e37b774dc4b7a4fd2fc827bdc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,481 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5269cc0374154390a519f1c0375da62d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,492 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#321 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:20,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,493 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/53d804087ccf4b5db41b32bb716177f5 is 50, key is test_row_0/B:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:20,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,494 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,501 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112045b9ba4ebac94f7e9d61ba63acfb42c4_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,504 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112045b9ba4ebac94f7e9d61ba63acfb42c4_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:20,504 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112045b9ba4ebac94f7e9d61ba63acfb42c4_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:20,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:20,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:25:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742203_1379 (size=12493) 2024-11-20T13:25:20,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:20,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:20,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:20,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:20,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:20,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:20,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742204_1380 (size=4469) 2024-11-20T13:25:20,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,521 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#322 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:20,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,522 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/930c9c29ebb040db8965724456f00b4c is 175, key is test_row_0/A:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:20,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112062d8df810b6645d8b26d07feaf293d21_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109119840/Put/seqid=0 2024-11-20T13:25:20,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742205_1381 (size=31447) 2024-11-20T13:25:20,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742206_1382 (size=17284) 2024-11-20T13:25:20,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,558 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/930c9c29ebb040db8965724456f00b4c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c 2024-11-20T13:25:20,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,563 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 930c9c29ebb040db8965724456f00b4c(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:20,563 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:20,563 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=12, startTime=1732109120476; duration=0sec 2024-11-20T13:25:20,563 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:20,563 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:20,563 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:20,565 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:20,565 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:20,565 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,565 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1b07d521b74346c787769d929a487f19, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=45.0 K 2024-11-20T13:25:20,567 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b07d521b74346c787769d929a487f19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732109116643 2024-11-20T13:25:20,567 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aeae30597f14641bf6434439f2b4a12, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732109117962 2024-11-20T13:25:20,568 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c99d895dac941e4b1662be30f93f075, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732109118326 2024-11-20T13:25:20,570 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22ce7e2efad74f549617d2dcc63e6e20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:20,579 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:20,580 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/1e4d4bae1f09472f86505c28b8ec7333 is 50, key is test_row_0/C:col10/1732109119474/Put/seqid=0 2024-11-20T13:25:20,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742207_1383 (size=12493) 2024-11-20T13:25:20,598 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/1e4d4bae1f09472f86505c28b8ec7333 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1e4d4bae1f09472f86505c28b8ec7333 2024-11-20T13:25:20,603 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 1e4d4bae1f09472f86505c28b8ec7333(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:20,603 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:20,603 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=12, startTime=1732109120477; duration=0sec 2024-11-20T13:25:20,603 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:20,603 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:20,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109180610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109180611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109180611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109180620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T13:25:20,698 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T13:25:20,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T13:25:20,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:20,704 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:20,705 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:20,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:20,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109180721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109180721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109180721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109180728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109180796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,804 DEBUG [Thread-1539 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8318 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:25:20,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:20,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:20,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:20,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:20,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:20,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:20,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:20,930 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/53d804087ccf4b5db41b32bb716177f5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/53d804087ccf4b5db41b32bb716177f5 2024-11-20T13:25:20,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109180925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109180926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109180926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:20,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109180931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:20,941 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into 53d804087ccf4b5db41b32bb716177f5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:20,941 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:20,941 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=12, startTime=1732109120477; duration=0sec 2024-11-20T13:25:20,941 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:20,941 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:20,956 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:20,961 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112062d8df810b6645d8b26d07feaf293d21_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112062d8df810b6645d8b26d07feaf293d21_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:20,962 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/35eaab8f05124f479ce573084a0bf208, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:20,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/35eaab8f05124f479ce573084a0bf208 is 175, key is test_row_0/A:col10/1732109119840/Put/seqid=0 2024-11-20T13:25:21,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:21,016 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742208_1384 (size=48389) 2024-11-20T13:25:21,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,168 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109181233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109181233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109181233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109181240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:21,321 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,416 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/35eaab8f05124f479ce573084a0bf208 2024-11-20T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/43296bc67c944c97aa2d0d97e580985b is 50, key is test_row_0/B:col10/1732109119840/Put/seqid=0 2024-11-20T13:25:21,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742209_1385 (size=12151) 2024-11-20T13:25:21,479 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,635 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109181739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109181740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109181741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109181751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:21,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:21,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:21,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/43296bc67c944c97aa2d0d97e580985b 2024-11-20T13:25:21,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/d1feda80ac9f41159661169832d00a3e is 50, key is test_row_0/C:col10/1732109119840/Put/seqid=0 2024-11-20T13:25:21,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742210_1386 (size=12151) 2024-11-20T13:25:21,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/d1feda80ac9f41159661169832d00a3e 2024-11-20T13:25:21,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/35eaab8f05124f479ce573084a0bf208 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208 2024-11-20T13:25:21,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208, entries=250, sequenceid=174, filesize=47.3 K 2024-11-20T13:25:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/43296bc67c944c97aa2d0d97e580985b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b 2024-11-20T13:25:21,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b, entries=150, sequenceid=174, filesize=11.9 K 2024-11-20T13:25:21,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/d1feda80ac9f41159661169832d00a3e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e 2024-11-20T13:25:21,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e, entries=150, sequenceid=174, filesize=11.9 K 2024-11-20T13:25:21,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 33af58af6776fca1f72685bf60c347d3 in 1410ms, sequenceid=174, compaction requested=false 2024-11-20T13:25:21,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:21,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:21,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T13:25:21,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:21,944 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:21,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:21,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e6eec3832dd4a7b9a5725d0a9fe9d8b_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109120619/Put/seqid=0 2024-11-20T13:25:21,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742211_1387 (size=12304) 2024-11-20T13:25:22,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,379 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205e6eec3832dd4a7b9a5725d0a9fe9d8b_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6eec3832dd4a7b9a5725d0a9fe9d8b_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/91cd5c868a6c4fae869ed0943e73702d, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:22,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/91cd5c868a6c4fae869ed0943e73702d is 175, key is test_row_0/A:col10/1732109120619/Put/seqid=0 2024-11-20T13:25:22,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742212_1388 (size=31105) 2024-11-20T13:25:22,385 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/91cd5c868a6c4fae869ed0943e73702d 2024-11-20T13:25:22,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/21def46d941b47b292545e797c350bd4 is 50, key is test_row_0/B:col10/1732109120619/Put/seqid=0 2024-11-20T13:25:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742213_1389 (size=12151) 2024-11-20T13:25:22,402 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/21def46d941b47b292545e797c350bd4 2024-11-20T13:25:22,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/290cca15535a4a73bf900c57dd9c0cda is 50, key is test_row_0/C:col10/1732109120619/Put/seqid=0 2024-11-20T13:25:22,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742214_1390 (size=12151) 2024-11-20T13:25:22,456 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/290cca15535a4a73bf900c57dd9c0cda 2024-11-20T13:25:22,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/91cd5c868a6c4fae869ed0943e73702d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d 2024-11-20T13:25:22,468 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d, entries=150, sequenceid=201, filesize=30.4 K 2024-11-20T13:25:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/21def46d941b47b292545e797c350bd4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4 2024-11-20T13:25:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,473 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T13:25:22,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/290cca15535a4a73bf900c57dd9c0cda as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda 2024-11-20T13:25:22,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,479 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T13:25:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,480 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 33af58af6776fca1f72685bf60c347d3 in 536ms, sequenceid=201, compaction requested=true 2024-11-20T13:25:22,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:22,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:22,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T13:25:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T13:25:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T13:25:22,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7760 sec 2024-11-20T13:25:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.7840 sec 2024-11-20T13:25:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T13:25:22,815 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T13:25:22,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:22,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T13:25:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,821 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,821 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:22,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:22,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:22,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:22,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:22,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204c81d0448e274871bf2a0a20e30c3ed3_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:22,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742216_1392 (size=24758) 2024-11-20T13:25:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204c81d0448e274871bf2a0a20e30c3ed3_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c81d0448e274871bf2a0a20e30c3ed3_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/2c5daed4c41144209f9354dfd923eb59, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/2c5daed4c41144209f9354dfd923eb59 is 175, key is test_row_0/A:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:22,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742215_1391 (size=74390) 2024-11-20T13:25:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,931 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/2c5daed4c41144209f9354dfd923eb59 2024-11-20T13:25:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0c53e3fdcada41038e5710c488782ee1 is 50, key is test_row_0/B:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:22,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742217_1393 (size=12147) 2024-11-20T13:25:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:22,977 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:22,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T13:25:22,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:22,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:22,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:22,978 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:22,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:22,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:22,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:22,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109182991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109182996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109182997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109182998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109183098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109183104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109183104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109183108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:23,130 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T13:25:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,283 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T13:25:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:23,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109183300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109183308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109183310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109183315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0c53e3fdcada41038e5710c488782ee1 2024-11-20T13:25:23,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/fddd2fb69bc940ad9478725c16ffa071 is 50, key is test_row_0/C:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:23,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742218_1394 (size=9757) 2024-11-20T13:25:23,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/fddd2fb69bc940ad9478725c16ffa071 2024-11-20T13:25:23,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/2c5daed4c41144209f9354dfd923eb59 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59 2024-11-20T13:25:23,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59, entries=400, sequenceid=212, filesize=72.6 K 2024-11-20T13:25:23,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/0c53e3fdcada41038e5710c488782ee1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1 2024-11-20T13:25:23,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T13:25:23,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/fddd2fb69bc940ad9478725c16ffa071 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071 2024-11-20T13:25:23,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071, entries=100, sequenceid=212, filesize=9.5 K 2024-11-20T13:25:23,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 33af58af6776fca1f72685bf60c347d3 in 504ms, sequenceid=212, compaction requested=true 2024-11-20T13:25:23,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:23,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:23,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:23,403 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:23,403 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:23,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:23,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:23,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:23,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:23,405 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 185331 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:23,405 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48942 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:23,405 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:23,405 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:23,405 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,405 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,405 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=181.0 K 2024-11-20T13:25:23,405 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/53d804087ccf4b5db41b32bb716177f5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=47.8 K 2024-11-20T13:25:23,405 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,405 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59] 2024-11-20T13:25:23,406 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 53d804087ccf4b5db41b32bb716177f5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:23,406 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 930c9c29ebb040db8965724456f00b4c, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:23,406 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 43296bc67c944c97aa2d0d97e580985b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732109119840 2024-11-20T13:25:23,406 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35eaab8f05124f479ce573084a0bf208, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732109119840 2024-11-20T13:25:23,407 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 21def46d941b47b292545e797c350bd4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732109120596 2024-11-20T13:25:23,407 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91cd5c868a6c4fae869ed0943e73702d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732109120596 2024-11-20T13:25:23,407 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c5daed4c41144209f9354dfd923eb59, keycount=400, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109122822 2024-11-20T13:25:23,407 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c53e3fdcada41038e5710c488782ee1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109122892 2024-11-20T13:25:23,421 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#333 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:23,421 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/16d5a68878e74d0c9ac71e09cbbff55d is 50, key is test_row_0/B:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:23,424 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:23,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:23,436 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a9c5b9bfa3b842ac8ff315e113fa7dbd_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:23,437 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,438 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:23,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:23,439 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a9c5b9bfa3b842ac8ff315e113fa7dbd_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:23,439 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a9c5b9bfa3b842ac8ff315e113fa7dbd_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:23,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742219_1395 (size=12629) 2024-11-20T13:25:23,463 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/16d5a68878e74d0c9ac71e09cbbff55d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/16d5a68878e74d0c9ac71e09cbbff55d 2024-11-20T13:25:23,484 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into 16d5a68878e74d0c9ac71e09cbbff55d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:23,485 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:23,485 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=12, startTime=1732109123403; duration=0sec 2024-11-20T13:25:23,485 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:23,485 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:23,485 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:25:23,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742220_1396 (size=4469) 2024-11-20T13:25:23,487 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:25:23,487 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:23,488 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:23,488 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1e4d4bae1f09472f86505c28b8ec7333, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=45.5 K 2024-11-20T13:25:23,489 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#334 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:23,489 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e4d4bae1f09472f86505c28b8ec7333, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732109119452 2024-11-20T13:25:23,489 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d1feda80ac9f41159661169832d00a3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732109119840 2024-11-20T13:25:23,489 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 290cca15535a4a73bf900c57dd9c0cda, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732109120596 2024-11-20T13:25:23,490 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting fddd2fb69bc940ad9478725c16ffa071, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109122892 2024-11-20T13:25:23,491 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/57e83376a183478e865cc2ef2ac66aae is 175, key is test_row_0/A:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742221_1397 (size=31690) 2024-11-20T13:25:23,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f171bf715c914fb0a7fd73b2f8435188_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109122996/Put/seqid=0 2024-11-20T13:25:23,514 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/57e83376a183478e865cc2ef2ac66aae as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae 2024-11-20T13:25:23,519 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 57e83376a183478e865cc2ef2ac66aae(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:23,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:23,519 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=12, startTime=1732109123403; duration=0sec 2024-11-20T13:25:23,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:23,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:23,534 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:23,535 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/03b07ae937d54153a49759df4195a34a is 50, key is test_row_0/C:col10/1732109122897/Put/seqid=0 2024-11-20T13:25:23,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742222_1398 (size=12304) 2024-11-20T13:25:23,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742223_1399 (size=12629) 2024-11-20T13:25:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:23,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:23,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109183623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109183624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109183625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109183627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,670 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:25:23,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109183729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109183730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109183732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109183732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:23,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109183935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109183936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109183937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109183937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:23,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:23,968 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f171bf715c914fb0a7fd73b2f8435188_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f171bf715c914fb0a7fd73b2f8435188_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:23,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/34a76a217d704bfda8eefad5d6232d1d, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:23,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/34a76a217d704bfda8eefad5d6232d1d is 175, key is test_row_0/A:col10/1732109122996/Put/seqid=0 2024-11-20T13:25:23,980 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/03b07ae937d54153a49759df4195a34a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/03b07ae937d54153a49759df4195a34a 2024-11-20T13:25:23,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742224_1400 (size=31105) 2024-11-20T13:25:23,982 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/34a76a217d704bfda8eefad5d6232d1d 2024-11-20T13:25:23,986 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 03b07ae937d54153a49759df4195a34a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:23,986 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:23,986 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=12, startTime=1732109123404; duration=0sec 2024-11-20T13:25:23,987 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:23,987 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:24,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 is 50, key is test_row_0/B:col10/1732109122996/Put/seqid=0 2024-11-20T13:25:24,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742225_1401 (size=12151) 2024-11-20T13:25:24,015 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 2024-11-20T13:25:24,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6d970d8d82f84b9b985e6cdafb4da424 is 50, key is test_row_0/C:col10/1732109122996/Put/seqid=0 2024-11-20T13:25:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742226_1402 (size=12151) 2024-11-20T13:25:24,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109184243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109184243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109184243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109184245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,480 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6d970d8d82f84b9b985e6cdafb4da424 2024-11-20T13:25:24,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/34a76a217d704bfda8eefad5d6232d1d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d 2024-11-20T13:25:24,490 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d, entries=150, sequenceid=238, filesize=30.4 K 2024-11-20T13:25:24,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 2024-11-20T13:25:24,496 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T13:25:24,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/6d970d8d82f84b9b985e6cdafb4da424 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424 2024-11-20T13:25:24,507 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424, entries=150, sequenceid=238, filesize=11.9 K 2024-11-20T13:25:24,508 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 33af58af6776fca1f72685bf60c347d3 in 1070ms, sequenceid=238, compaction requested=false 2024-11-20T13:25:24,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:24,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:24,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T13:25:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T13:25:24,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T13:25:24,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6880 sec 2024-11-20T13:25:24,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.6950 sec 2024-11-20T13:25:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:24,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:25:24,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:24,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:24,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:24,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:24,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:24,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:24,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120297fb4da84e1462ebe054fa4c91b9982_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:24,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742227_1403 (size=14794) 2024-11-20T13:25:24,769 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:24,773 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120297fb4da84e1462ebe054fa4c91b9982_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120297fb4da84e1462ebe054fa4c91b9982_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:24,774 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/af95badc5d3c423899b3da54bafa4fbd, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:24,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/af95badc5d3c423899b3da54bafa4fbd is 175, key is test_row_0/A:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:24,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742228_1404 (size=39749) 2024-11-20T13:25:24,778 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/af95badc5d3c423899b3da54bafa4fbd 2024-11-20T13:25:24,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/30e515420cd54580a5ad1b7165afd935 is 50, key is test_row_0/B:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742229_1405 (size=12151) 2024-11-20T13:25:24,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109184822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109184824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109184824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109184825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109184927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T13:25:24,934 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T13:25:24,935 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T13:25:24,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109184932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,937 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T13:25:24,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:24,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:24,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109184937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:24,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109184938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T13:25:25,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T13:25:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109185134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109185139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109185143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109185144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/30e515420cd54580a5ad1b7165afd935 2024-11-20T13:25:25,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/55fcc6538fb046199010426c866f1fa3 is 50, key is test_row_0/C:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:25,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T13:25:25,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T13:25:25,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:25,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:25,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742230_1406 (size=12151) 2024-11-20T13:25:25,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/55fcc6538fb046199010426c866f1fa3 2024-11-20T13:25:25,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/af95badc5d3c423899b3da54bafa4fbd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd 2024-11-20T13:25:25,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd, entries=200, sequenceid=252, filesize=38.8 K 2024-11-20T13:25:25,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/30e515420cd54580a5ad1b7165afd935 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935 2024-11-20T13:25:25,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T13:25:25,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/55fcc6538fb046199010426c866f1fa3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3 2024-11-20T13:25:25,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T13:25:25,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 33af58af6776fca1f72685bf60c347d3 in 552ms, sequenceid=252, compaction requested=true 2024-11-20T13:25:25,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:25,304 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:25,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:25:25,304 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:25,308 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102544 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:25,308 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:25,308 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,308 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=100.1 K 2024-11-20T13:25:25,308 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,308 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd] 2024-11-20T13:25:25,308 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:25,308 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:25,308 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,308 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/16d5a68878e74d0c9ac71e09cbbff55d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.1 K 2024-11-20T13:25:25,309 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16d5a68878e74d0c9ac71e09cbbff55d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109120610 2024-11-20T13:25:25,309 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 57e83376a183478e865cc2ef2ac66aae, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109120610 2024-11-20T13:25:25,309 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c36a43c0a3c4d459a8a1bbe20d74fd2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732109122989 2024-11-20T13:25:25,310 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 34a76a217d704bfda8eefad5d6232d1d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732109122989 2024-11-20T13:25:25,310 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30e515420cd54580a5ad1b7165afd935, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:25,310 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting af95badc5d3c423899b3da54bafa4fbd, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:25,319 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:25,328 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#343 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:25,329 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/b2608d06611244448f12b98c45cd9567 is 50, key is test_row_0/B:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:25,345 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203a32cdc8ca2c4543963b9619c4870d27_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:25,347 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203a32cdc8ca2c4543963b9619c4870d27_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:25,347 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203a32cdc8ca2c4543963b9619c4870d27_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:25,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742232_1408 (size=4469) 2024-11-20T13:25:25,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T13:25:25,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,399 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:25,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:25,404 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#342 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:25,405 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/f77c56aa8a3a41bb9fde16ef5b047d7e is 175, key is test_row_0/A:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:25,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742231_1407 (size=12731) 2024-11-20T13:25:25,419 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/b2608d06611244448f12b98c45cd9567 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/b2608d06611244448f12b98c45cd9567 2024-11-20T13:25:25,427 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into b2608d06611244448f12b98c45cd9567(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:25,427 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:25,427 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109125304; duration=0sec 2024-11-20T13:25:25,427 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:25,427 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:25,427 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:25,431 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:25,431 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:25,431 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,431 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/03b07ae937d54153a49759df4195a34a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.1 K 2024-11-20T13:25:25,432 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03b07ae937d54153a49759df4195a34a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732109120610 2024-11-20T13:25:25,432 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d970d8d82f84b9b985e6cdafb4da424, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732109122989 2024-11-20T13:25:25,433 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55fcc6538fb046199010426c866f1fa3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:25,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742233_1409 (size=31685) 2024-11-20T13:25:25,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120462140a3cb4c4469abd0ab77256d5ad0_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109124821/Put/seqid=0 2024-11-20T13:25:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:25,455 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/f77c56aa8a3a41bb9fde16ef5b047d7e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e 2024-11-20T13:25:25,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742234_1410 (size=12454) 2024-11-20T13:25:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:25,466 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into f77c56aa8a3a41bb9fde16ef5b047d7e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:25,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:25,466 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109125303; duration=0sec 2024-11-20T13:25:25,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:25,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:25,469 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120462140a3cb4c4469abd0ab77256d5ad0_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120462140a3cb4c4469abd0ab77256d5ad0_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:25,470 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#345 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:25,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/04fbf64a15334fdbb25be0835f916c09, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:25,471 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/9a1e803aadd04802bfc3a20a59296f8e is 50, key is test_row_0/C:col10/1732109123623/Put/seqid=0 2024-11-20T13:25:25,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/04fbf64a15334fdbb25be0835f916c09 is 175, key is test_row_0/A:col10/1732109124821/Put/seqid=0 2024-11-20T13:25:25,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109185465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109185465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109185467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109185469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742235_1411 (size=31255) 2024-11-20T13:25:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742236_1412 (size=12731) 2024-11-20T13:25:25,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/9a1e803aadd04802bfc3a20a59296f8e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/9a1e803aadd04802bfc3a20a59296f8e 2024-11-20T13:25:25,526 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 9a1e803aadd04802bfc3a20a59296f8e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:25,526 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:25,526 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109125304; duration=0sec 2024-11-20T13:25:25,526 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:25,526 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T13:25:25,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109185576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109185578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109185578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109185578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109185781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109185786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109185788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109185789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:25,902 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/04fbf64a15334fdbb25be0835f916c09 2024-11-20T13:25:25,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/62cba26c6c0b47d0b4b5241903ae7544 is 50, key is test_row_0/B:col10/1732109124821/Put/seqid=0 2024-11-20T13:25:25,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742237_1413 (size=12301) 2024-11-20T13:25:25,935 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/62cba26c6c0b47d0b4b5241903ae7544 2024-11-20T13:25:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/a1d86e8f6c9f4313953ca018d91f8ccc is 50, key is test_row_0/C:col10/1732109124821/Put/seqid=0 2024-11-20T13:25:25,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742238_1414 (size=12301) 2024-11-20T13:25:25,955 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/a1d86e8f6c9f4313953ca018d91f8ccc 2024-11-20T13:25:25,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/04fbf64a15334fdbb25be0835f916c09 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09 2024-11-20T13:25:25,964 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09, entries=150, sequenceid=277, filesize=30.5 K 2024-11-20T13:25:25,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/62cba26c6c0b47d0b4b5241903ae7544 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544 2024-11-20T13:25:25,968 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T13:25:25,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/a1d86e8f6c9f4313953ca018d91f8ccc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc 2024-11-20T13:25:25,972 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T13:25:25,976 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 33af58af6776fca1f72685bf60c347d3 in 577ms, sequenceid=277, compaction requested=false 2024-11-20T13:25:25,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:25,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:25,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T13:25:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T13:25:25,979 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T13:25:25,979 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0400 sec 2024-11-20T13:25:25,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.0440 sec 2024-11-20T13:25:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T13:25:26,041 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T13:25:26,042 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T13:25:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:26,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:26,045 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:26,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:26,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:26,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120165b235700594ebebbe768e70d498c02_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742239_1415 (size=14994) 2024-11-20T13:25:26,114 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:26,117 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120165b235700594ebebbe768e70d498c02_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120165b235700594ebebbe768e70d498c02_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:26,118 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/485d898139a24b4f92eb937b8141b5b0, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:26,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/485d898139a24b4f92eb937b8141b5b0 is 175, key is test_row_0/A:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742240_1416 (size=39949) 2024-11-20T13:25:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:26,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109186162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109186163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109186169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109186171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109186272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109186272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109186273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109186279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:26,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109186479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109186480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109186480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109186486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,503 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,527 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/485d898139a24b4f92eb937b8141b5b0 2024-11-20T13:25:26,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2685a729ab884f6686c556237b1fc920 is 50, key is test_row_0/B:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:26,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742241_1417 (size=12301) 2024-11-20T13:25:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:26,656 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109186784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109186787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109186789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:26,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109186791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,809 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,810 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2685a729ab884f6686c556237b1fc920 2024-11-20T13:25:26,963 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:26,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:26,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:26,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:26,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:26,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/980f693ad49248978d044238d54acf85 is 50, key is test_row_0/C:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742242_1418 (size=12301) 2024-11-20T13:25:26,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/980f693ad49248978d044238d54acf85 2024-11-20T13:25:27,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/485d898139a24b4f92eb937b8141b5b0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0 2024-11-20T13:25:27,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0, entries=200, sequenceid=292, filesize=39.0 K 2024-11-20T13:25:27,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2685a729ab884f6686c556237b1fc920 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920 2024-11-20T13:25:27,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920, entries=150, sequenceid=292, filesize=12.0 K 2024-11-20T13:25:27,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/980f693ad49248978d044238d54acf85 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85 2024-11-20T13:25:27,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85, entries=150, sequenceid=292, filesize=12.0 K 2024-11-20T13:25:27,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 33af58af6776fca1f72685bf60c347d3 in 933ms, sequenceid=292, compaction requested=true 2024-11-20T13:25:27,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:27,023 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:27,023 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:27,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:27,025 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:27,025 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:27,025 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,025 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/b2608d06611244448f12b98c45cd9567, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.5 K 2024-11-20T13:25:27,026 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:27,026 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:27,026 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,026 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=100.5 K 2024-11-20T13:25:27,026 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,026 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0] 2024-11-20T13:25:27,026 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b2608d06611244448f12b98c45cd9567, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:27,026 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting f77c56aa8a3a41bb9fde16ef5b047d7e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:27,027 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 62cba26c6c0b47d0b4b5241903ae7544, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732109124821 2024-11-20T13:25:27,027 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04fbf64a15334fdbb25be0835f916c09, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732109124821 2024-11-20T13:25:27,027 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2685a729ab884f6686c556237b1fc920, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125464 2024-11-20T13:25:27,028 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 485d898139a24b4f92eb937b8141b5b0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125459 2024-11-20T13:25:27,042 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#351 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:27,043 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e102e03d55964e10af2610769af39adc is 50, key is test_row_0/B:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:27,056 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:27,081 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112030ad84183faf4457a525d81e9509f03d_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:27,083 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112030ad84183faf4457a525d81e9509f03d_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:27,083 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112030ad84183faf4457a525d81e9509f03d_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:27,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742243_1419 (size=12983) 2024-11-20T13:25:27,096 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e102e03d55964e10af2610769af39adc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e102e03d55964e10af2610769af39adc 2024-11-20T13:25:27,104 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into e102e03d55964e10af2610769af39adc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:27,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:27,104 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109127023; duration=0sec 2024-11-20T13:25:27,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:27,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:27,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:27,106 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:27,106 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:27,106 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,107 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/9a1e803aadd04802bfc3a20a59296f8e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.5 K 2024-11-20T13:25:27,109 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a1e803aadd04802bfc3a20a59296f8e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732109123623 2024-11-20T13:25:27,111 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a1d86e8f6c9f4313953ca018d91f8ccc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732109124821 2024-11-20T13:25:27,111 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 980f693ad49248978d044238d54acf85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125464 2024-11-20T13:25:27,116 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T13:25:27,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,117 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742244_1420 (size=4469) 2024-11-20T13:25:27,127 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#352 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:27,127 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/acb035c9513142508779c0032fe51465 is 175, key is test_row_0/A:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:27,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014ed1f56ce224f139c03b62e88ed5b08_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:27,146 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#354 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:27,147 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/4633e84a37ce4dc7bf05d99f128d0b4e is 50, key is test_row_0/C:col10/1732109125464/Put/seqid=0 2024-11-20T13:25:27,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:27,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742245_1421 (size=31937) 2024-11-20T13:25:27,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742246_1422 (size=12454) 2024-11-20T13:25:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,186 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014ed1f56ce224f139c03b62e88ed5b08_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014ed1f56ce224f139c03b62e88ed5b08_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:27,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/883c305c3d864c46a7fd6e6006bc04c6, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:27,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/883c305c3d864c46a7fd6e6006bc04c6 is 175, key is test_row_0/A:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:27,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742247_1423 (size=12983) 2024-11-20T13:25:27,211 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/4633e84a37ce4dc7bf05d99f128d0b4e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/4633e84a37ce4dc7bf05d99f128d0b4e 2024-11-20T13:25:27,217 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 4633e84a37ce4dc7bf05d99f128d0b4e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:27,217 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:27,217 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109127023; duration=0sec 2024-11-20T13:25:27,217 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:27,217 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:27,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742248_1424 (size=31255) 2024-11-20T13:25:27,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:27,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:27,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109187310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109187312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109187314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109187315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109187422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109187423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109187423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109187423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,559 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/acb035c9513142508779c0032fe51465 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465 2024-11-20T13:25:27,564 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into acb035c9513142508779c0032fe51465(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:27,564 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:27,564 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109127022; duration=0sec 2024-11-20T13:25:27,564 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:27,564 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:27,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109187626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109187626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109187626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109187628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:27,633 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/883c305c3d864c46a7fd6e6006bc04c6 2024-11-20T13:25:27,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/bc4b7679351b4a8bbae31af158e8f1da is 50, key is test_row_0/B:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:27,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742249_1425 (size=12301) 2024-11-20T13:25:27,655 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/bc4b7679351b4a8bbae31af158e8f1da 2024-11-20T13:25:27,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0828bc02fee0497984abf68a0de6335f is 50, key is test_row_0/C:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742250_1426 (size=12301) 2024-11-20T13:25:27,677 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0828bc02fee0497984abf68a0de6335f 2024-11-20T13:25:27,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/883c305c3d864c46a7fd6e6006bc04c6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6 2024-11-20T13:25:27,716 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6, entries=150, sequenceid=317, filesize=30.5 K 2024-11-20T13:25:27,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/bc4b7679351b4a8bbae31af158e8f1da as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da 2024-11-20T13:25:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,724 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T13:25:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/0828bc02fee0497984abf68a0de6335f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f 2024-11-20T13:25:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,729 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T13:25:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 33af58af6776fca1f72685bf60c347d3 in 614ms, sequenceid=317, compaction requested=false 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:27,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:27,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T13:25:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T13:25:27,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6870 sec 2024-11-20T13:25:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.6920 sec 2024-11-20T13:25:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:25:27,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:27,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:27,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f30bfe5fd57549028b329c007fe74540_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_1/A:col10/1732109127311/Put/seqid=0 2024-11-20T13:25:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742251_1427 (size=12454) 2024-11-20T13:25:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:28,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109187998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109188000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109188001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109188004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109188105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109188105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109188107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109188108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T13:25:28,148 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T13:25:28,150 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:28,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T13:25:28,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:28,152 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:28,153 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:28,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:28,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T13:25:28,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:28,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109188309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109188311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109188312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109188316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,364 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:28,369 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f30bfe5fd57549028b329c007fe74540_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f30bfe5fd57549028b329c007fe74540_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:28,370 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/69ca557bcce145d4b4acd2100f655f8b, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:28,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/69ca557bcce145d4b4acd2100f655f8b is 175, key is test_row_1/A:col10/1732109127311/Put/seqid=0 2024-11-20T13:25:28,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742252_1428 (size=31251) 2024-11-20T13:25:28,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:28,458 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T13:25:28,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:28,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T13:25:28,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:28,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109188615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109188615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109188620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109188621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,652 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0] to archive 2024-11-20T13:25:28,655 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:28,658 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cef4fb1cbc354a40a074f3530601af57 2024-11-20T13:25:28,668 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/75b649cd5a4a4fa7b9df0cf0abf9a8fd 2024-11-20T13:25:28,671 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/545a742a6d8c4d28a6e44499e7ea4a00 2024-11-20T13:25:28,673 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5a1089b3763f4338a6dfa891f3558db6 2024-11-20T13:25:28,674 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dc1eed49c7f54cbb83a9e15af9d79bb0 2024-11-20T13:25:28,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/dac8e1a0eacc410297a3cd787cad3542 2024-11-20T13:25:28,677 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d6ca9eebed374760b2129da263831267 2024-11-20T13:25:28,679 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/eb6451f2571849adb224bee5fcbd9c07 2024-11-20T13:25:28,681 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/a37bdc631ca64444a20d24aa01043d9a 2024-11-20T13:25:28,682 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/930c9c29ebb040db8965724456f00b4c 2024-11-20T13:25:28,684 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/5269cc0374154390a519f1c0375da62d 2024-11-20T13:25:28,685 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/35eaab8f05124f479ce573084a0bf208 2024-11-20T13:25:28,686 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/91cd5c868a6c4fae869ed0943e73702d 2024-11-20T13:25:28,689 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/2c5daed4c41144209f9354dfd923eb59 2024-11-20T13:25:28,691 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/57e83376a183478e865cc2ef2ac66aae 2024-11-20T13:25:28,693 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/34a76a217d704bfda8eefad5d6232d1d 2024-11-20T13:25:28,694 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/af95badc5d3c423899b3da54bafa4fbd 2024-11-20T13:25:28,696 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/f77c56aa8a3a41bb9fde16ef5b047d7e 2024-11-20T13:25:28,697 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/04fbf64a15334fdbb25be0835f916c09 2024-11-20T13:25:28,699 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/485d898139a24b4f92eb937b8141b5b0 2024-11-20T13:25:28,703 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/a60fef2f99ab459386d1013ad22236d4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/1607178b1c8840ad99c0b5292e779f98, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/53d804087ccf4b5db41b32bb716177f5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/16d5a68878e74d0c9ac71e09cbbff55d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/b2608d06611244448f12b98c45cd9567, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920] to archive 2024-11-20T13:25:28,705 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:28,707 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0ce0a57b1a0c421cbb350c99287fa3c7 2024-11-20T13:25:28,709 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/64291bd35cd94996828c2a538a45a778 2024-11-20T13:25:28,710 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/a60fef2f99ab459386d1013ad22236d4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/a60fef2f99ab459386d1013ad22236d4 2024-11-20T13:25:28,711 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/abdb996d3bb44460824e57cb663bec42 2024-11-20T13:25:28,713 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e77e02257ad8447193c578a71030eff3 2024-11-20T13:25:28,714 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/1607178b1c8840ad99c0b5292e779f98 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/1607178b1c8840ad99c0b5292e779f98 2024-11-20T13:25:28,715 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/31d6120b5fed4785b32fb70f00668927 2024-11-20T13:25:28,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/922556f11c864a0997483d492b854423 2024-11-20T13:25:28,718 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/730cc30a65894f9d991509ccdade2c58 2024-11-20T13:25:28,719 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/53d804087ccf4b5db41b32bb716177f5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/53d804087ccf4b5db41b32bb716177f5 2024-11-20T13:25:28,721 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/760b73e37b774dc4b7a4fd2fc827bdc6 2024-11-20T13:25:28,722 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/43296bc67c944c97aa2d0d97e580985b 2024-11-20T13:25:28,723 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/21def46d941b47b292545e797c350bd4 2024-11-20T13:25:28,724 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/16d5a68878e74d0c9ac71e09cbbff55d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/16d5a68878e74d0c9ac71e09cbbff55d 2024-11-20T13:25:28,726 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/0c53e3fdcada41038e5710c488782ee1 2024-11-20T13:25:28,727 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5c36a43c0a3c4d459a8a1bbe20d74fd2 2024-11-20T13:25:28,729 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/b2608d06611244448f12b98c45cd9567 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/b2608d06611244448f12b98c45cd9567 2024-11-20T13:25:28,730 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/30e515420cd54580a5ad1b7165afd935 2024-11-20T13:25:28,732 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/62cba26c6c0b47d0b4b5241903ae7544 2024-11-20T13:25:28,734 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2685a729ab884f6686c556237b1fc920 2024-11-20T13:25:28,738 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/df0b3f1ed9214d50911c7d20019c9ab9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1b07d521b74346c787769d929a487f19, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1e4d4bae1f09472f86505c28b8ec7333, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/03b07ae937d54153a49759df4195a34a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/9a1e803aadd04802bfc3a20a59296f8e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85] to archive 2024-11-20T13:25:28,739 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:28,744 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/f43f5c77728b449bb95c167023781582 2024-11-20T13:25:28,745 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0f514d7af1e647d0aa79f610d95aaa50 2024-11-20T13:25:28,746 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/df0b3f1ed9214d50911c7d20019c9ab9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/df0b3f1ed9214d50911c7d20019c9ab9 2024-11-20T13:25:28,748 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6bbfa61504da43d898a53fdcf77683be 2024-11-20T13:25:28,750 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/ae9689c14a764f1680e96f4d50fed7fc 2024-11-20T13:25:28,752 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1b07d521b74346c787769d929a487f19 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1b07d521b74346c787769d929a487f19 2024-11-20T13:25:28,753 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c83c4da5db03466f8b39505f44dbd3d2 2024-11-20T13:25:28,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:28,754 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0aeae30597f14641bf6434439f2b4a12 2024-11-20T13:25:28,757 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/8c99d895dac941e4b1662be30f93f075 2024-11-20T13:25:28,759 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1e4d4bae1f09472f86505c28b8ec7333 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/1e4d4bae1f09472f86505c28b8ec7333 2024-11-20T13:25:28,761 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/22ce7e2efad74f549617d2dcc63e6e20 2024-11-20T13:25:28,763 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/d1feda80ac9f41159661169832d00a3e 2024-11-20T13:25:28,764 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/290cca15535a4a73bf900c57dd9c0cda 2024-11-20T13:25:28,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T13:25:28,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:28,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:28,768 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/03b07ae937d54153a49759df4195a34a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/03b07ae937d54153a49759df4195a34a 2024-11-20T13:25:28,771 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/fddd2fb69bc940ad9478725c16ffa071 2024-11-20T13:25:28,773 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/6d970d8d82f84b9b985e6cdafb4da424 2024-11-20T13:25:28,775 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/9a1e803aadd04802bfc3a20a59296f8e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/9a1e803aadd04802bfc3a20a59296f8e 2024-11-20T13:25:28,777 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/55fcc6538fb046199010426c866f1fa3 2024-11-20T13:25:28,778 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/a1d86e8f6c9f4313953ca018d91f8ccc 2024-11-20T13:25:28,781 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/5ef453f0fbb6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/980f693ad49248978d044238d54acf85 2024-11-20T13:25:28,796 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/69ca557bcce145d4b4acd2100f655f8b 2024-11-20T13:25:28,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2e3a90f6b49d49eb851a78e8cc7221c8 is 50, key is test_row_1/B:col10/1732109127311/Put/seqid=0 2024-11-20T13:25:28,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742253_1429 (size=9857) 2024-11-20T13:25:28,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2e3a90f6b49d49eb851a78e8cc7221c8 2024-11-20T13:25:28,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/91c3d3564def469d8aeacef9e9434905 is 50, key is test_row_1/C:col10/1732109127311/Put/seqid=0 2024-11-20T13:25:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742254_1430 (size=9857) 2024-11-20T13:25:28,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/91c3d3564def469d8aeacef9e9434905 2024-11-20T13:25:28,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/69ca557bcce145d4b4acd2100f655f8b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b 2024-11-20T13:25:28,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b, entries=150, sequenceid=332, filesize=30.5 K 2024-11-20T13:25:28,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/2e3a90f6b49d49eb851a78e8cc7221c8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8 2024-11-20T13:25:28,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8, entries=100, sequenceid=332, filesize=9.6 K 2024-11-20T13:25:28,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/91c3d3564def469d8aeacef9e9434905 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905 2024-11-20T13:25:28,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905, entries=100, sequenceid=332, filesize=9.6 K 2024-11-20T13:25:28,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 33af58af6776fca1f72685bf60c347d3 in 965ms, sequenceid=332, compaction requested=true 2024-11-20T13:25:28,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:28,912 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:28,913 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:28,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:28,914 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:28,914 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:28,914 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,914 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=92.2 K 2024-11-20T13:25:28,914 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,914 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b] 2024-11-20T13:25:28,915 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting acb035c9513142508779c0032fe51465, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125464 2024-11-20T13:25:28,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:28,915 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:28,915 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,915 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e102e03d55964e10af2610769af39adc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=34.3 K 2024-11-20T13:25:28,915 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 883c305c3d864c46a7fd6e6006bc04c6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732109126103 2024-11-20T13:25:28,916 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e102e03d55964e10af2610769af39adc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125464 2024-11-20T13:25:28,916 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69ca557bcce145d4b4acd2100f655f8b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109127306 2024-11-20T13:25:28,917 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting bc4b7679351b4a8bbae31af158e8f1da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732109126103 2024-11-20T13:25:28,917 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e3a90f6b49d49eb851a78e8cc7221c8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109127311 2024-11-20T13:25:28,923 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:28,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T13:25:28,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,923 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:28,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:28,939 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#360 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:28,941 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:28,941 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/7ab4245036f74b7e8c04390ffe0ad4ae is 50, key is test_row_0/B:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:28,949 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f36278bfa3f248a69c4bc8bb280c6f7f_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:28,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209abe4e14c29a4108ae256abfe51fe278_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109128002/Put/seqid=0 2024-11-20T13:25:28,950 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f36278bfa3f248a69c4bc8bb280c6f7f_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:28,952 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f36278bfa3f248a69c4bc8bb280c6f7f_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:28,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742255_1431 (size=12404) 2024-11-20T13:25:28,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742257_1433 (size=4469) 2024-11-20T13:25:28,966 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/7ab4245036f74b7e8c04390ffe0ad4ae as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/7ab4245036f74b7e8c04390ffe0ad4ae 2024-11-20T13:25:28,966 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#361 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:28,967 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/9948c70148474d38a20746fc53bb4bce is 175, key is test_row_0/A:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:28,979 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into 7ab4245036f74b7e8c04390ffe0ad4ae(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:28,979 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:28,979 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109128913; duration=0sec 2024-11-20T13:25:28,979 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:28,979 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:28,979 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:28,982 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:28,982 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:28,982 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:28,983 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/4633e84a37ce4dc7bf05d99f128d0b4e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=34.3 K 2024-11-20T13:25:28,984 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4633e84a37ce4dc7bf05d99f128d0b4e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732109125464 2024-11-20T13:25:28,996 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0828bc02fee0497984abf68a0de6335f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732109126103 2024-11-20T13:25:28,997 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 91c3d3564def469d8aeacef9e9434905, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109127311 2024-11-20T13:25:29,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742256_1432 (size=12454) 2024-11-20T13:25:29,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:29,009 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209abe4e14c29a4108ae256abfe51fe278_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209abe4e14c29a4108ae256abfe51fe278_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:29,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8901750f1ad840188ac091fe2fa23822, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:29,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8901750f1ad840188ac091fe2fa23822 is 175, key is test_row_0/A:col10/1732109128002/Put/seqid=0 2024-11-20T13:25:29,012 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:29,013 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/c359124a5ca44fdcbe375a9fc887bd86 is 50, key is test_row_0/C:col10/1732109126103/Put/seqid=0 2024-11-20T13:25:29,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742258_1434 (size=31465) 2024-11-20T13:25:29,028 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/9948c70148474d38a20746fc53bb4bce as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce 2024-11-20T13:25:29,034 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 9948c70148474d38a20746fc53bb4bce(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:29,034 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:29,034 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109128912; duration=0sec 2024-11-20T13:25:29,034 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:29,034 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:29,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742260_1436 (size=12404) 2024-11-20T13:25:29,068 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/c359124a5ca44fdcbe375a9fc887bd86 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c359124a5ca44fdcbe375a9fc887bd86 2024-11-20T13:25:29,075 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into c359124a5ca44fdcbe375a9fc887bd86(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:29,075 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:29,075 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109128913; duration=0sec 2024-11-20T13:25:29,075 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:29,075 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:29,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742259_1435 (size=31255) 2024-11-20T13:25:29,078 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8901750f1ad840188ac091fe2fa23822 2024-11-20T13:25:29,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/264d11662b9d4555ad9c953277983978 is 50, key is test_row_0/B:col10/1732109128002/Put/seqid=0 2024-11-20T13:25:29,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742261_1437 (size=12301) 2024-11-20T13:25:29,116 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/264d11662b9d4555ad9c953277983978 2024-11-20T13:25:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:29,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/62362fae03af4a579e077ab8e080ee39 is 50, key is test_row_0/C:col10/1732109128002/Put/seqid=0 2024-11-20T13:25:29,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109189140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109189144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109189143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109189144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742262_1438 (size=12301) 2024-11-20T13:25:29,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109189246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:29,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109189251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109189251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109189252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109189451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109189458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109189459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109189459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/62362fae03af4a579e077ab8e080ee39 2024-11-20T13:25:29,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8901750f1ad840188ac091fe2fa23822 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822 2024-11-20T13:25:29,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822, entries=150, sequenceid=355, filesize=30.5 K 2024-11-20T13:25:29,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/264d11662b9d4555ad9c953277983978 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978 2024-11-20T13:25:29,572 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978, entries=150, sequenceid=355, filesize=12.0 K 2024-11-20T13:25:29,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/62362fae03af4a579e077ab8e080ee39 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39 2024-11-20T13:25:29,578 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39, entries=150, sequenceid=355, filesize=12.0 K 2024-11-20T13:25:29,583 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 33af58af6776fca1f72685bf60c347d3 in 660ms, sequenceid=355, compaction requested=false 2024-11-20T13:25:29,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:29,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:29,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T13:25:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T13:25:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T13:25:29,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4310 sec 2024-11-20T13:25:29,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.4360 sec 2024-11-20T13:25:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:29,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:29,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:29,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203d474c83899248f587f924baaa7528b3_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:29,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742263_1439 (size=14994) 2024-11-20T13:25:29,782 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:29,788 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203d474c83899248f587f924baaa7528b3_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d474c83899248f587f924baaa7528b3_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:29,790 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/45aea7714c204f2cb4b4d3a990af7bda, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:29,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/45aea7714c204f2cb4b4d3a990af7bda is 175, key is test_row_0/A:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742264_1440 (size=39949) 2024-11-20T13:25:29,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109189808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109189812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109189814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109189814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109189916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109189921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109189927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:29,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109189928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109190119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109190127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109190136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109190136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,199 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/45aea7714c204f2cb4b4d3a990af7bda 2024-11-20T13:25:30,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/6a77edf11ee243309695096e262f33a2 is 50, key is test_row_0/B:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:30,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742265_1441 (size=12301) 2024-11-20T13:25:30,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/6a77edf11ee243309695096e262f33a2 2024-11-20T13:25:30,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T13:25:30,259 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T13:25:30,260 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:30,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/24887886b67947acbc98f81fc0a801dd is 50, key is test_row_0/C:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T13:25:30,263 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:30,265 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:30,265 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T13:25:30,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742266_1442 (size=12301) 2024-11-20T13:25:30,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/24887886b67947acbc98f81fc0a801dd 2024-11-20T13:25:30,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/45aea7714c204f2cb4b4d3a990af7bda as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda 2024-11-20T13:25:30,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda, entries=200, sequenceid=372, filesize=39.0 K 2024-11-20T13:25:30,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/6a77edf11ee243309695096e262f33a2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2 2024-11-20T13:25:30,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T13:25:30,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/24887886b67947acbc98f81fc0a801dd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd 2024-11-20T13:25:30,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T13:25:30,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 33af58af6776fca1f72685bf60c347d3 in 551ms, sequenceid=372, compaction requested=true 2024-11-20T13:25:30,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:30,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:30,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:30,315 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:30,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:30,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:30,315 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:30,316 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102669 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:30,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:30,316 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:30,316 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37006 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:30,316 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,316 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:30,316 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=100.3 K 2024-11-20T13:25:30,316 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,316 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,316 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda] 2024-11-20T13:25:30,316 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/7ab4245036f74b7e8c04390ffe0ad4ae, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.1 K 2024-11-20T13:25:30,317 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9948c70148474d38a20746fc53bb4bce, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109126103 2024-11-20T13:25:30,317 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ab4245036f74b7e8c04390ffe0ad4ae, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109126103 2024-11-20T13:25:30,317 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8901750f1ad840188ac091fe2fa23822, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732109127995 2024-11-20T13:25:30,318 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45aea7714c204f2cb4b4d3a990af7bda, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:30,318 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 264d11662b9d4555ad9c953277983978, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732109127995 2024-11-20T13:25:30,318 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a77edf11ee243309695096e262f33a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:30,332 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,349 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#370 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:30,350 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/6931f809d4054881b60ce92860c6316f is 50, key is test_row_0/B:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:30,361 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120506b965d609d42019a99d50e59071717_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,363 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120506b965d609d42019a99d50e59071717_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,363 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120506b965d609d42019a99d50e59071717_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T13:25:30,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742267_1443 (size=4469) 2024-11-20T13:25:30,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742268_1444 (size=12507) 2024-11-20T13:25:30,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,417 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:30,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,426 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/6931f809d4054881b60ce92860c6316f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6931f809d4054881b60ce92860c6316f 2024-11-20T13:25:30,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:30,433 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into 6931f809d4054881b60ce92860c6316f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:30,433 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:30,433 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109130315; duration=0sec 2024-11-20T13:25:30,433 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:30,433 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:30,433 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:30,435 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37006 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:30,435 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:30,435 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,435 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c359124a5ca44fdcbe375a9fc887bd86, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.1 K 2024-11-20T13:25:30,436 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c359124a5ca44fdcbe375a9fc887bd86, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732109126103 2024-11-20T13:25:30,436 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 62362fae03af4a579e077ab8e080ee39, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732109127995 2024-11-20T13:25:30,436 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 24887886b67947acbc98f81fc0a801dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:30,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e252e30b67d345119224d69e2121a06c_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109129797/Put/seqid=0 2024-11-20T13:25:30,446 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#372 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:30,446 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/2ca72a560cfd43c1a54c001ad6ac28fe is 50, key is test_row_0/C:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:30,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742269_1445 (size=12454) 2024-11-20T13:25:30,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:30,467 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e252e30b67d345119224d69e2121a06c_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e252e30b67d345119224d69e2121a06c_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:30,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cb13b3be4e5749a38f9f19ba1d8f891e, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cb13b3be4e5749a38f9f19ba1d8f891e is 175, key is test_row_0/A:col10/1732109129797/Put/seqid=0 2024-11-20T13:25:30,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109190466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109190468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109190468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109190469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742270_1446 (size=12507) 2024-11-20T13:25:30,496 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/2ca72a560cfd43c1a54c001ad6ac28fe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/2ca72a560cfd43c1a54c001ad6ac28fe 2024-11-20T13:25:30,503 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 2ca72a560cfd43c1a54c001ad6ac28fe(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:30,503 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:30,503 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109130316; duration=0sec 2024-11-20T13:25:30,503 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:30,503 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:30,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742271_1447 (size=31255) 2024-11-20T13:25:30,532 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cb13b3be4e5749a38f9f19ba1d8f891e 2024-11-20T13:25:30,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/70257b3e6bdb4533aa418973a65724b9 is 50, key is test_row_0/B:col10/1732109129797/Put/seqid=0 2024-11-20T13:25:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T13:25:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742272_1448 (size=12301) 2024-11-20T13:25:30,577 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/70257b3e6bdb4533aa418973a65724b9 2024-11-20T13:25:30,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/20d78a2e0a6a4e098767b0bab2071d40 is 50, key is test_row_0/C:col10/1732109129797/Put/seqid=0 2024-11-20T13:25:30,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109190578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109190582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109190583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109190588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742273_1449 (size=12301) 2024-11-20T13:25:30,625 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/20d78a2e0a6a4e098767b0bab2071d40 2024-11-20T13:25:30,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/cb13b3be4e5749a38f9f19ba1d8f891e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e 2024-11-20T13:25:30,637 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e, entries=150, sequenceid=395, filesize=30.5 K 2024-11-20T13:25:30,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/70257b3e6bdb4533aa418973a65724b9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9 2024-11-20T13:25:30,642 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9, entries=150, sequenceid=395, filesize=12.0 K 2024-11-20T13:25:30,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/20d78a2e0a6a4e098767b0bab2071d40 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40 2024-11-20T13:25:30,651 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40, entries=150, sequenceid=395, filesize=12.0 K 2024-11-20T13:25:30,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-11-20T13:25:30,652 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 33af58af6776fca1f72685bf60c347d3 in 235ms, sequenceid=395, compaction requested=false 2024-11-20T13:25:30,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:30,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:30,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T13:25:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T13:25:30,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T13:25:30,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 389 msec 2024-11-20T13:25:30,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 396 msec 2024-11-20T13:25:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:30,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:25:30,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:30,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:30,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:30,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:30,804 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#369 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:30,805 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/d201e4b011634afa9ec2b5d3af94a539 is 175, key is test_row_0/A:col10/1732109129142/Put/seqid=0 2024-11-20T13:25:30,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206c7d2f1ba5414fac9c8476580b170d28_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742274_1450 (size=31461) 2024-11-20T13:25:30,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742275_1451 (size=12454) 2024-11-20T13:25:30,833 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/d201e4b011634afa9ec2b5d3af94a539 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539 2024-11-20T13:25:30,833 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:30,837 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206c7d2f1ba5414fac9c8476580b170d28_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206c7d2f1ba5414fac9c8476580b170d28_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:30,838 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into d201e4b011634afa9ec2b5d3af94a539(size=30.7 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:30,839 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:30,839 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109130315; duration=0sec 2024-11-20T13:25:30,839 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:30,839 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:30,839 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/13e2ff12d0ac4c4aacf717f45e2b1fa5, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:30,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 is 175, key is test_row_0/A:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:30,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742276_1452 (size=31255) 2024-11-20T13:25:30,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109190840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109190845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109190846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109190850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T13:25:30,870 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T13:25:30,871 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:30,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T13:25:30,873 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:30,873 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:30,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:30,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:30,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53046 deadline: 1732109190891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,897 DEBUG [Thread-1539 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18411 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:25:30,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109190950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109190955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109190955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109190955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:30,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:31,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109191163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109191163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109191165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109191166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:31,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,245 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=413, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 2024-11-20T13:25:31,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/72daafba53724067abe51704ff5a6f3e is 50, key is test_row_0/B:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:31,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742277_1453 (size=12301) 2024-11-20T13:25:31,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109191468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109191469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109191469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:31,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109191475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:31,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/72daafba53724067abe51704ff5a6f3e 2024-11-20T13:25:31,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/e3d319d6b15d422d93f68d3d365d1f33 is 50, key is test_row_0/C:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:31,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742278_1454 (size=12301) 2024-11-20T13:25:31,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/e3d319d6b15d422d93f68d3d365d1f33 2024-11-20T13:25:31,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 2024-11-20T13:25:31,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5, entries=150, sequenceid=413, filesize=30.5 K 2024-11-20T13:25:31,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/72daafba53724067abe51704ff5a6f3e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e 2024-11-20T13:25:31,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T13:25:31,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/e3d319d6b15d422d93f68d3d365d1f33 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33 2024-11-20T13:25:31,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T13:25:31,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 33af58af6776fca1f72685bf60c347d3 in 939ms, sequenceid=413, compaction requested=true 2024-11-20T13:25:31,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:31,739 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:31,740 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93971 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:31,740 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:31,740 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,740 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=91.8 K 2024-11-20T13:25:31,740 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,740 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5] 2024-11-20T13:25:31,741 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d201e4b011634afa9ec2b5d3af94a539, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:31,741 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb13b3be4e5749a38f9f19ba1d8f891e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732109129797 2024-11-20T13:25:31,742 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13e2ff12d0ac4c4aacf717f45e2b1fa5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:31,748 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:31,749 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37109 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:31,749 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:31,749 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,749 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6931f809d4054881b60ce92860c6316f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.2 K 2024-11-20T13:25:31,750 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6931f809d4054881b60ce92860c6316f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:31,750 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 70257b3e6bdb4533aa418973a65724b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732109129797 2024-11-20T13:25:31,750 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 72daafba53724067abe51704ff5a6f3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:31,760 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:31,766 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#379 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:31,766 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e9d747ab617543bfbb7aa2b8e8a0a791 is 50, key is test_row_0/B:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:31,772 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112099624d91e6524ff19c423272c31c093a_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:31,774 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112099624d91e6524ff19c423272c31c093a_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:31,774 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112099624d91e6524ff19c423272c31c093a_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:31,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742279_1455 (size=12609) 2024-11-20T13:25:31,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742280_1456 (size=4469) 2024-11-20T13:25:31,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:31,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T13:25:31,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:31,799 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:31,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:31,800 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#378 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:31,801 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/98cee556009b4ca6a6fba36c0a475b7e is 175, key is test_row_0/A:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:31,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742281_1457 (size=31563) 2024-11-20T13:25:31,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209d6ab6edc4ff4b3ab22a0595cbd53f71_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109130844/Put/seqid=0 2024-11-20T13:25:31,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742282_1458 (size=12454) 2024-11-20T13:25:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:31,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. as already flushing 2024-11-20T13:25:31,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109191990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109191991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109191993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109191998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109192100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109192101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109192101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109192108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,205 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/e9d747ab617543bfbb7aa2b8e8a0a791 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e9d747ab617543bfbb7aa2b8e8a0a791 2024-11-20T13:25:32,212 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into e9d747ab617543bfbb7aa2b8e8a0a791(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:32,212 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:32,212 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109131748; duration=0sec 2024-11-20T13:25:32,212 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:32,212 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:32,212 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:32,215 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37109 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:32,215 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:32,215 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:32,215 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/2ca72a560cfd43c1a54c001ad6ac28fe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=36.2 K 2024-11-20T13:25:32,216 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ca72a560cfd43c1a54c001ad6ac28fe, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732109129141 2024-11-20T13:25:32,216 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 20d78a2e0a6a4e098767b0bab2071d40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732109129797 2024-11-20T13:25:32,217 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e3d319d6b15d422d93f68d3d365d1f33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:32,231 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/98cee556009b4ca6a6fba36c0a475b7e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e 2024-11-20T13:25:32,237 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 98cee556009b4ca6a6fba36c0a475b7e(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:32,237 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:32,237 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109131739; duration=0sec 2024-11-20T13:25:32,237 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:32,237 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:32,237 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#381 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:32,239 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/db53b557d9264e2da4c16d37169be7b2 is 50, key is test_row_0/C:col10/1732109130800/Put/seqid=0 2024-11-20T13:25:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742283_1459 (size=12609) 2024-11-20T13:25:32,256 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/db53b557d9264e2da4c16d37169be7b2 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/db53b557d9264e2da4c16d37169be7b2 2024-11-20T13:25:32,260 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into db53b557d9264e2da4c16d37169be7b2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:32,260 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:32,260 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109131757; duration=0sec 2024-11-20T13:25:32,260 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:32,260 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:32,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:32,265 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209d6ab6edc4ff4b3ab22a0595cbd53f71_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d6ab6edc4ff4b3ab22a0595cbd53f71_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:32,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/bdbbb5ebccff451b8330341f1ef924e3, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:32,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/bdbbb5ebccff451b8330341f1ef924e3 is 175, key is test_row_0/A:col10/1732109130844/Put/seqid=0 2024-11-20T13:25:32,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742284_1460 (size=31255) 2024-11-20T13:25:32,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109192308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109192309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109192309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109192315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,491 DEBUG [Thread-1554 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65a3d8e5 to 127.0.0.1:53074 2024-11-20T13:25:32,491 DEBUG [Thread-1548 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0edf02c6 to 127.0.0.1:53074 2024-11-20T13:25:32,491 DEBUG [Thread-1554 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:32,492 DEBUG [Thread-1548 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:32,494 DEBUG [Thread-1550 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f9705e1 to 127.0.0.1:53074 2024-11-20T13:25:32,494 DEBUG [Thread-1550 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:32,495 DEBUG [Thread-1552 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bbfc363 to 127.0.0.1:53074 2024-11-20T13:25:32,495 DEBUG [Thread-1552 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:32,496 DEBUG [Thread-1546 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x670c8f03 to 127.0.0.1:53074 2024-11-20T13:25:32,496 DEBUG [Thread-1546 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:32,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109192614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109192616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109192616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109192620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:32,693 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=434, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/bdbbb5ebccff451b8330341f1ef924e3 2024-11-20T13:25:32,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/3a82a5b4f6c94a89b896ae3e35977407 is 50, key is test_row_0/B:col10/1732109130844/Put/seqid=0 2024-11-20T13:25:32,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742285_1461 (size=12301) 2024-11-20T13:25:32,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:33,102 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/3a82a5b4f6c94a89b896ae3e35977407 2024-11-20T13:25:33,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/23cbb6b2a6bd4b7c996d64862fee02be is 50, key is test_row_0/C:col10/1732109130844/Put/seqid=0 2024-11-20T13:25:33,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742286_1462 (size=12301) 2024-11-20T13:25:33,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:33,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52988 deadline: 1732109193118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:33,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:33,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53028 deadline: 1732109193121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53036 deadline: 1732109193121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:33,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:33,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52992 deadline: 1732109193125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:33,513 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/23cbb6b2a6bd4b7c996d64862fee02be 2024-11-20T13:25:33,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/bdbbb5ebccff451b8330341f1ef924e3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3 2024-11-20T13:25:33,521 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3, entries=150, sequenceid=434, filesize=30.5 K 2024-11-20T13:25:33,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/3a82a5b4f6c94a89b896ae3e35977407 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407 2024-11-20T13:25:33,525 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T13:25:33,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/23cbb6b2a6bd4b7c996d64862fee02be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be 2024-11-20T13:25:33,529 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be, entries=150, sequenceid=434, filesize=12.0 K 2024-11-20T13:25:33,530 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 33af58af6776fca1f72685bf60c347d3 in 1732ms, sequenceid=434, compaction requested=false 2024-11-20T13:25:33,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:33,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:33,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T13:25:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T13:25:33,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T13:25:33,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6580 sec 2024-11-20T13:25:33,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.6610 sec 2024-11-20T13:25:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:34,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T13:25:34,127 DEBUG [Thread-1543 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x23fd6c87 to 127.0.0.1:53074 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:34,127 DEBUG [Thread-1543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:34,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:34,129 DEBUG [Thread-1537 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x659f4c7c to 127.0.0.1:53074 2024-11-20T13:25:34,129 DEBUG [Thread-1537 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:34,132 DEBUG [Thread-1541 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ce70a91 to 127.0.0.1:53074 2024-11-20T13:25:34,132 DEBUG [Thread-1541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:34,134 DEBUG [Thread-1535 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50206885 to 127.0.0.1:53074 2024-11-20T13:25:34,134 DEBUG [Thread-1535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:34,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209430101215824a6fbf92020527a75781_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:34,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742287_1463 (size=9914) 2024-11-20T13:25:34,561 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:34,565 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209430101215824a6fbf92020527a75781_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209430101215824a6fbf92020527a75781_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:34,566 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/457451490daf41ff829bc758968f9cd6, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:34,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/457451490daf41ff829bc758968f9cd6 is 175, key is test_row_0/A:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:34,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742288_1464 (size=22561) 2024-11-20T13:25:34,971 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=454, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/457451490daf41ff829bc758968f9cd6 2024-11-20T13:25:34,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/da8376ee4a7b4739a774a7db11687ea6 is 50, key is test_row_0/B:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:34,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T13:25:34,979 INFO [Thread-1545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T13:25:34,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742289_1465 (size=9857) 2024-11-20T13:25:35,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/da8376ee4a7b4739a774a7db11687ea6 2024-11-20T13:25:35,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/7c2a5f86594e451c85a5a97f7db58e0e is 50, key is test_row_0/C:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:35,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742290_1466 (size=9857) 2024-11-20T13:25:35,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/7c2a5f86594e451c85a5a97f7db58e0e 2024-11-20T13:25:35,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/457451490daf41ff829bc758968f9cd6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6 2024-11-20T13:25:35,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6, entries=100, sequenceid=454, filesize=22.0 K 2024-11-20T13:25:35,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/da8376ee4a7b4739a774a7db11687ea6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6 2024-11-20T13:25:35,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6, entries=100, sequenceid=454, filesize=9.6 K 2024-11-20T13:25:35,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/7c2a5f86594e451c85a5a97f7db58e0e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e 2024-11-20T13:25:35,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e, entries=100, sequenceid=454, filesize=9.6 K 2024-11-20T13:25:35,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=20.13 KB/20610 for 33af58af6776fca1f72685bf60c347d3 in 1690ms, sequenceid=454, compaction requested=true 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:35,817 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33af58af6776fca1f72685bf60c347d3:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:35,817 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:35,821 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:35,821 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/B is initiating minor compaction (all files) 2024-11-20T13:25:35,821 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/B in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:35,822 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e9d747ab617543bfbb7aa2b8e8a0a791, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=34.0 K 2024-11-20T13:25:35,822 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:35,822 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/A is initiating minor compaction (all files) 2024-11-20T13:25:35,822 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/A in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:35,822 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=83.4 K 2024-11-20T13:25:35,822 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:35,822 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6] 2024-11-20T13:25:35,823 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e9d747ab617543bfbb7aa2b8e8a0a791, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:35,823 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98cee556009b4ca6a6fba36c0a475b7e, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:35,823 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a82a5b4f6c94a89b896ae3e35977407, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732109130826 2024-11-20T13:25:35,823 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdbbb5ebccff451b8330341f1ef924e3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732109130826 2024-11-20T13:25:35,824 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting da8376ee4a7b4739a774a7db11687ea6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732109131986 2024-11-20T13:25:35,824 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 457451490daf41ff829bc758968f9cd6, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732109131986 2024-11-20T13:25:35,831 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:35,832 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#B#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:35,833 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/ee8b4ccb56ea4958971c8fb2bb10b310 is 50, key is test_row_0/B:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:35,833 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ab425111859e4c8680bb3158f4fcd392_33af58af6776fca1f72685bf60c347d3 store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:35,838 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ab425111859e4c8680bb3158f4fcd392_33af58af6776fca1f72685bf60c347d3, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:35,838 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ab425111859e4c8680bb3158f4fcd392_33af58af6776fca1f72685bf60c347d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742292_1468 (size=4469) 2024-11-20T13:25:35,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742291_1467 (size=12711) 2024-11-20T13:25:36,258 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#A#compaction#388 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:36,259 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8b166bd076b84f398c03e615b371e310 is 175, key is test_row_0/A:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:36,263 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/ee8b4ccb56ea4958971c8fb2bb10b310 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/ee8b4ccb56ea4958971c8fb2bb10b310 2024-11-20T13:25:36,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742293_1469 (size=31772) 2024-11-20T13:25:36,267 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/B of 33af58af6776fca1f72685bf60c347d3 into ee8b4ccb56ea4958971c8fb2bb10b310(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:36,267 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:36,267 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/B, priority=13, startTime=1732109135817; duration=0sec 2024-11-20T13:25:36,268 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:36,268 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:B 2024-11-20T13:25:36,268 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:36,269 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:36,269 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 33af58af6776fca1f72685bf60c347d3/C is initiating minor compaction (all files) 2024-11-20T13:25:36,269 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33af58af6776fca1f72685bf60c347d3/C in TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:36,269 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/db53b557d9264e2da4c16d37169be7b2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp, totalSize=34.0 K 2024-11-20T13:25:36,269 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting db53b557d9264e2da4c16d37169be7b2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109130453 2024-11-20T13:25:36,270 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 23cbb6b2a6bd4b7c996d64862fee02be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732109130826 2024-11-20T13:25:36,270 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c2a5f86594e451c85a5a97f7db58e0e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732109131986 2024-11-20T13:25:36,283 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33af58af6776fca1f72685bf60c347d3#C#compaction#389 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:36,283 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/244e0cd7b1c34f139c58b106f742d843 is 50, key is test_row_0/C:col10/1732109134125/Put/seqid=0 2024-11-20T13:25:36,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742294_1470 (size=12711) 2024-11-20T13:25:36,298 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/244e0cd7b1c34f139c58b106f742d843 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/244e0cd7b1c34f139c58b106f742d843 2024-11-20T13:25:36,302 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/C of 33af58af6776fca1f72685bf60c347d3 into 244e0cd7b1c34f139c58b106f742d843(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:36,302 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:36,302 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/C, priority=13, startTime=1732109135817; duration=0sec 2024-11-20T13:25:36,303 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:36,303 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:C 2024-11-20T13:25:36,668 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/8b166bd076b84f398c03e615b371e310 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8b166bd076b84f398c03e615b371e310 2024-11-20T13:25:36,672 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33af58af6776fca1f72685bf60c347d3/A of 33af58af6776fca1f72685bf60c347d3 into 8b166bd076b84f398c03e615b371e310(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:36,672 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:36,672 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3., storeName=33af58af6776fca1f72685bf60c347d3/A, priority=13, startTime=1732109135817; duration=0sec 2024-11-20T13:25:36,672 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:36,672 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33af58af6776fca1f72685bf60c347d3:A 2024-11-20T13:25:40,983 DEBUG [Thread-1539 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d2f5cd9 to 127.0.0.1:53074 2024-11-20T13:25:40,983 DEBUG [Thread-1539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1554 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4662 rows 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1542 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4626 rows 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1530 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4590 rows 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1544 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4632 rows 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1566 2024-11-20T13:25:40,984 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4698 rows 2024-11-20T13:25:40,984 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:25:40,984 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a3c6746 to 127.0.0.1:53074 2024-11-20T13:25:40,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:25:40,989 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:25:40,989 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:25:40,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:40,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:40,994 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109140994"}]},"ts":"1732109140994"} 2024-11-20T13:25:40,995 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:25:40,998 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:25:40,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:25:41,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, UNASSIGN}] 2024-11-20T13:25:41,001 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, UNASSIGN 2024-11-20T13:25:41,002 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:41,003 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:25:41,003 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:41,154 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:41,154 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:41,154 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:25:41,154 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 33af58af6776fca1f72685bf60c347d3, disabling compactions & flushes 2024-11-20T13:25:41,154 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:41,154 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. after waiting 0 ms 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:41,155 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 33af58af6776fca1f72685bf60c347d3 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=A 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=B 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33af58af6776fca1f72685bf60c347d3, store=C 2024-11-20T13:25:41,155 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:41,160 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088a752d9096f44009a159cd5b55f8b85_33af58af6776fca1f72685bf60c347d3 is 50, key is test_row_0/A:col10/1732109140982/Put/seqid=0 2024-11-20T13:25:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742295_1471 (size=12454) 2024-11-20T13:25:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:41,564 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:41,568 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088a752d9096f44009a159cd5b55f8b85_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088a752d9096f44009a159cd5b55f8b85_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:41,569 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/542d7ec5098f497a8a67d67111532ca5, store: [table=TestAcidGuarantees family=A region=33af58af6776fca1f72685bf60c347d3] 2024-11-20T13:25:41,569 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/542d7ec5098f497a8a67d67111532ca5 is 175, key is test_row_0/A:col10/1732109140982/Put/seqid=0 2024-11-20T13:25:41,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742296_1472 (size=31255) 2024-11-20T13:25:41,580 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=464, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/542d7ec5098f497a8a67d67111532ca5 2024-11-20T13:25:41,588 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5f4d6e39d9ef400591391724fc633329 is 50, key is test_row_0/B:col10/1732109140982/Put/seqid=0 2024-11-20T13:25:41,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742297_1473 (size=12301) 2024-11-20T13:25:41,594 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5f4d6e39d9ef400591391724fc633329 2024-11-20T13:25:41,602 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/17bf4ca70a044c59badaf122829906af is 50, key is test_row_0/C:col10/1732109140982/Put/seqid=0 2024-11-20T13:25:41,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742298_1474 (size=12301) 2024-11-20T13:25:42,014 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=464 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/17bf4ca70a044c59badaf122829906af 2024-11-20T13:25:42,018 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/A/542d7ec5098f497a8a67d67111532ca5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/542d7ec5098f497a8a67d67111532ca5 2024-11-20T13:25:42,022 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/542d7ec5098f497a8a67d67111532ca5, entries=150, sequenceid=464, filesize=30.5 K 2024-11-20T13:25:42,022 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/B/5f4d6e39d9ef400591391724fc633329 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5f4d6e39d9ef400591391724fc633329 2024-11-20T13:25:42,025 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5f4d6e39d9ef400591391724fc633329, entries=150, sequenceid=464, filesize=12.0 K 2024-11-20T13:25:42,026 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/.tmp/C/17bf4ca70a044c59badaf122829906af as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/17bf4ca70a044c59badaf122829906af 2024-11-20T13:25:42,029 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/17bf4ca70a044c59badaf122829906af, entries=150, sequenceid=464, filesize=12.0 K 2024-11-20T13:25:42,030 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 33af58af6776fca1f72685bf60c347d3 in 875ms, sequenceid=464, compaction requested=false 2024-11-20T13:25:42,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6] to archive 2024-11-20T13:25:42,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:42,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/acb035c9513142508779c0032fe51465 2024-11-20T13:25:42,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/883c305c3d864c46a7fd6e6006bc04c6 2024-11-20T13:25:42,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/9948c70148474d38a20746fc53bb4bce 2024-11-20T13:25:42,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/69ca557bcce145d4b4acd2100f655f8b 2024-11-20T13:25:42,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8901750f1ad840188ac091fe2fa23822 2024-11-20T13:25:42,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/45aea7714c204f2cb4b4d3a990af7bda 2024-11-20T13:25:42,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/d201e4b011634afa9ec2b5d3af94a539 2024-11-20T13:25:42,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/cb13b3be4e5749a38f9f19ba1d8f891e 2024-11-20T13:25:42,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/98cee556009b4ca6a6fba36c0a475b7e 2024-11-20T13:25:42,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/13e2ff12d0ac4c4aacf717f45e2b1fa5 2024-11-20T13:25:42,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/bdbbb5ebccff451b8330341f1ef924e3 2024-11-20T13:25:42,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/457451490daf41ff829bc758968f9cd6 2024-11-20T13:25:42,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e102e03d55964e10af2610769af39adc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/7ab4245036f74b7e8c04390ffe0ad4ae, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6931f809d4054881b60ce92860c6316f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e9d747ab617543bfbb7aa2b8e8a0a791, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6] to archive 2024-11-20T13:25:42,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:42,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e102e03d55964e10af2610769af39adc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e102e03d55964e10af2610769af39adc 2024-11-20T13:25:42,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/bc4b7679351b4a8bbae31af158e8f1da 2024-11-20T13:25:42,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/7ab4245036f74b7e8c04390ffe0ad4ae to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/7ab4245036f74b7e8c04390ffe0ad4ae 2024-11-20T13:25:42,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/2e3a90f6b49d49eb851a78e8cc7221c8 2024-11-20T13:25:42,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/264d11662b9d4555ad9c953277983978 2024-11-20T13:25:42,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6931f809d4054881b60ce92860c6316f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6931f809d4054881b60ce92860c6316f 2024-11-20T13:25:42,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/6a77edf11ee243309695096e262f33a2 2024-11-20T13:25:42,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/70257b3e6bdb4533aa418973a65724b9 2024-11-20T13:25:42,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e9d747ab617543bfbb7aa2b8e8a0a791 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/e9d747ab617543bfbb7aa2b8e8a0a791 2024-11-20T13:25:42,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/72daafba53724067abe51704ff5a6f3e 2024-11-20T13:25:42,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/3a82a5b4f6c94a89b896ae3e35977407 2024-11-20T13:25:42,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/da8376ee4a7b4739a774a7db11687ea6 2024-11-20T13:25:42,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/4633e84a37ce4dc7bf05d99f128d0b4e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c359124a5ca44fdcbe375a9fc887bd86, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/2ca72a560cfd43c1a54c001ad6ac28fe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/db53b557d9264e2da4c16d37169be7b2, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e] to archive 2024-11-20T13:25:42,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:25:42,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/4633e84a37ce4dc7bf05d99f128d0b4e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/4633e84a37ce4dc7bf05d99f128d0b4e 2024-11-20T13:25:42,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/0828bc02fee0497984abf68a0de6335f 2024-11-20T13:25:42,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c359124a5ca44fdcbe375a9fc887bd86 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/c359124a5ca44fdcbe375a9fc887bd86 2024-11-20T13:25:42,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/91c3d3564def469d8aeacef9e9434905 2024-11-20T13:25:42,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/62362fae03af4a579e077ab8e080ee39 2024-11-20T13:25:42,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/2ca72a560cfd43c1a54c001ad6ac28fe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/2ca72a560cfd43c1a54c001ad6ac28fe 2024-11-20T13:25:42,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/24887886b67947acbc98f81fc0a801dd 2024-11-20T13:25:42,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/20d78a2e0a6a4e098767b0bab2071d40 2024-11-20T13:25:42,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/db53b557d9264e2da4c16d37169be7b2 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/db53b557d9264e2da4c16d37169be7b2 2024-11-20T13:25:42,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/e3d319d6b15d422d93f68d3d365d1f33 2024-11-20T13:25:42,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/23cbb6b2a6bd4b7c996d64862fee02be 2024-11-20T13:25:42,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/7c2a5f86594e451c85a5a97f7db58e0e 2024-11-20T13:25:42,092 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits/467.seqid, newMaxSeqId=467, maxSeqId=4 2024-11-20T13:25:42,092 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3. 2024-11-20T13:25:42,092 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 33af58af6776fca1f72685bf60c347d3: 2024-11-20T13:25:42,094 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:42,094 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=33af58af6776fca1f72685bf60c347d3, regionState=CLOSED 2024-11-20T13:25:42,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T13:25:42,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 33af58af6776fca1f72685bf60c347d3, server=5ef453f0fbb6,46739,1732109006137 in 1.0920 sec 2024-11-20T13:25:42,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-20T13:25:42,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33af58af6776fca1f72685bf60c347d3, UNASSIGN in 1.0960 sec 2024-11-20T13:25:42,099 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T13:25:42,099 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1000 sec 2024-11-20T13:25:42,100 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109142100"}]},"ts":"1732109142100"} 2024-11-20T13:25:42,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:25:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:42,135 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:25:42,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-11-20T13:25:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T13:25:43,104 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T13:25:43,105 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:25:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,107 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T13:25:43,108 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,110 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,115 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits] 2024-11-20T13:25:43,119 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/542d7ec5098f497a8a67d67111532ca5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/542d7ec5098f497a8a67d67111532ca5 2024-11-20T13:25:43,120 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8b166bd076b84f398c03e615b371e310 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/A/8b166bd076b84f398c03e615b371e310 2024-11-20T13:25:43,123 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5f4d6e39d9ef400591391724fc633329 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/5f4d6e39d9ef400591391724fc633329 2024-11-20T13:25:43,124 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/ee8b4ccb56ea4958971c8fb2bb10b310 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/B/ee8b4ccb56ea4958971c8fb2bb10b310 2024-11-20T13:25:43,134 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/17bf4ca70a044c59badaf122829906af to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/17bf4ca70a044c59badaf122829906af 2024-11-20T13:25:43,135 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/244e0cd7b1c34f139c58b106f742d843 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/C/244e0cd7b1c34f139c58b106f742d843 2024-11-20T13:25:43,140 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits/467.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3/recovered.edits/467.seqid 2024-11-20T13:25:43,141 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,142 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:25:43,142 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:25:43,143 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T13:25:43,147 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120137baece001049858208bcb70a66de92_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120137baece001049858208bcb70a66de92_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,148 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014ed1f56ce224f139c03b62e88ed5b08_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014ed1f56ce224f139c03b62e88ed5b08_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,150 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120165b235700594ebebbe768e70d498c02_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120165b235700594ebebbe768e70d498c02_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,151 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120297fb4da84e1462ebe054fa4c91b9982_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120297fb4da84e1462ebe054fa4c91b9982_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,152 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d474c83899248f587f924baaa7528b3_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d474c83899248f587f924baaa7528b3_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,154 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044d774642d34443e8279edad4b5f0ca2_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044d774642d34443e8279edad4b5f0ca2_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,155 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120462140a3cb4c4469abd0ab77256d5ad0_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120462140a3cb4c4469abd0ab77256d5ad0_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,156 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c81d0448e274871bf2a0a20e30c3ed3_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204c81d0448e274871bf2a0a20e30c3ed3_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,157 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204ed6d1697fd2419192f38d75a2f9db5d_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204ed6d1697fd2419192f38d75a2f9db5d_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,159 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120594eba400df148cb950f74393aecfc66_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120594eba400df148cb950f74393aecfc66_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,160 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6eec3832dd4a7b9a5725d0a9fe9d8b_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e6eec3832dd4a7b9a5725d0a9fe9d8b_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,161 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e8673d611c1460c84ae140dd827cfe5_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205e8673d611c1460c84ae140dd827cfe5_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,162 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112062d8df810b6645d8b26d07feaf293d21_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112062d8df810b6645d8b26d07feaf293d21_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,164 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206c7d2f1ba5414fac9c8476580b170d28_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206c7d2f1ba5414fac9c8476580b170d28_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,165 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088a752d9096f44009a159cd5b55f8b85_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088a752d9096f44009a159cd5b55f8b85_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,166 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208fbbda440d244bb5b06ba739337f1b5a_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208fbbda440d244bb5b06ba739337f1b5a_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,167 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209430101215824a6fbf92020527a75781_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209430101215824a6fbf92020527a75781_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,168 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209abe4e14c29a4108ae256abfe51fe278_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209abe4e14c29a4108ae256abfe51fe278_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,170 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d6ab6edc4ff4b3ab22a0595cbd53f71_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d6ab6edc4ff4b3ab22a0595cbd53f71_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,171 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dc5be2fdb9a24ffd8b4a9fc64910522e_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dc5be2fdb9a24ffd8b4a9fc64910522e_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,172 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e252e30b67d345119224d69e2121a06c_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e252e30b67d345119224d69e2121a06c_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,173 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f171bf715c914fb0a7fd73b2f8435188_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f171bf715c914fb0a7fd73b2f8435188_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,175 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f30bfe5fd57549028b329c007fe74540_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f30bfe5fd57549028b329c007fe74540_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,176 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8b79872a3414ef0ada3e42706ab2077_33af58af6776fca1f72685bf60c347d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f8b79872a3414ef0ada3e42706ab2077_33af58af6776fca1f72685bf60c347d3 2024-11-20T13:25:43,176 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:25:43,179 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,181 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:25:43,183 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:25:43,184 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,184 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:25:43,184 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109143184"}]},"ts":"9223372036854775807"} 2024-11-20T13:25:43,187 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:25:43,187 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 33af58af6776fca1f72685bf60c347d3, NAME => 'TestAcidGuarantees,,1732109108590.33af58af6776fca1f72685bf60c347d3.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:25:43,187 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:25:43,187 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109143187"}]},"ts":"9223372036854775807"} 2024-11-20T13:25:43,188 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:25:43,191 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 86 msec 2024-11-20T13:25:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T13:25:43,208 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T13:25:43,228 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=451 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=944 (was 1193), ProcessCount=11 (was 11), AvailableMemoryMB=250 (was 1148) 2024-11-20T13:25:43,242 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=944, ProcessCount=11, AvailableMemoryMB=248 2024-11-20T13:25:43,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:25:43,244 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:25:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:25:43,246 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:25:43,246 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:43,246 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-20T13:25:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:43,247 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:25:43,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742299_1475 (size=963) 2024-11-20T13:25:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:43,656 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:25:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742300_1476 (size=53) 2024-11-20T13:25:43,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 24db988c4fa8e1a0b1451e8c68b68697, disabling compactions & flushes 2024-11-20T13:25:44,062 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. after waiting 0 ms 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,062 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,062 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:44,063 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:25:44,063 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109144063"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109144063"}]},"ts":"1732109144063"} 2024-11-20T13:25:44,064 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:25:44,065 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:25:44,065 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109144065"}]},"ts":"1732109144065"} 2024-11-20T13:25:44,066 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:25:44,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, ASSIGN}] 2024-11-20T13:25:44,071 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, ASSIGN 2024-11-20T13:25:44,071 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:25:44,222 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=24db988c4fa8e1a0b1451e8c68b68697, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:44,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:25:44,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:44,375 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:44,377 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,378 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:25:44,378 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,378 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:25:44,378 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,378 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,379 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,380 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:44,381 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 24db988c4fa8e1a0b1451e8c68b68697 columnFamilyName A 2024-11-20T13:25:44,381 DEBUG [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:44,381 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(327): Store=24db988c4fa8e1a0b1451e8c68b68697/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:44,381 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,382 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:44,382 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 24db988c4fa8e1a0b1451e8c68b68697 columnFamilyName B 2024-11-20T13:25:44,382 DEBUG [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:44,382 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(327): Store=24db988c4fa8e1a0b1451e8c68b68697/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:44,383 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,384 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:25:44,384 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 24db988c4fa8e1a0b1451e8c68b68697 columnFamilyName C 2024-11-20T13:25:44,384 DEBUG [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:25:44,384 INFO [StoreOpener-24db988c4fa8e1a0b1451e8c68b68697-1 {}] regionserver.HStore(327): Store=24db988c4fa8e1a0b1451e8c68b68697/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:25:44,384 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,385 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,385 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,386 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:25:44,388 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:44,389 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:25:44,390 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 24db988c4fa8e1a0b1451e8c68b68697; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74608856, jitterRate=0.11175858974456787}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:25:44,390 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:44,391 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., pid=129, masterSystemTime=1732109144375 2024-11-20T13:25:44,392 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,393 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:44,393 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=24db988c4fa8e1a0b1451e8c68b68697, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:44,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T13:25:44,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 in 171 msec 2024-11-20T13:25:44,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-20T13:25:44,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, ASSIGN in 325 msec 2024-11-20T13:25:44,397 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:25:44,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109144397"}]},"ts":"1732109144397"} 2024-11-20T13:25:44,398 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:25:44,401 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:25:44,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1570 sec 2024-11-20T13:25:45,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T13:25:45,354 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-20T13:25:45,355 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x373cbe4b to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@112932e5 2024-11-20T13:25:45,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eebe889, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,360 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,362 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,363 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:25:45,363 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:25:45,365 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1baebee3 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43481969 2024-11-20T13:25:45,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32961e29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x01b5ec79 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2352e77 2024-11-20T13:25:45,371 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d00f4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,371 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51237850 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a4eb295 2024-11-20T13:25:45,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a6478ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,374 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131834fc to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c4de610 2024-11-20T13:25:45,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b75664, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,377 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x579a9390 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24949215 2024-11-20T13:25:45,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b6ff495, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,380 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6263bb7a to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@468c20ff 2024-11-20T13:25:45,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@331eefb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,387 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a8593f to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3504084d 2024-11-20T13:25:45,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a3427a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,390 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30d04cd0 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@25837ce7 2024-11-20T13:25:45,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ed84abf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,394 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10d34e60 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e097cd9 2024-11-20T13:25:45,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22084615, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,398 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x33ef6df4 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@566520e4 2024-11-20T13:25:45,402 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bdecd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:25:45,414 DEBUG [hconnection-0x8608415-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,414 DEBUG [hconnection-0x5413c4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,414 DEBUG [hconnection-0x565a2b34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,414 DEBUG [hconnection-0x6d042fb5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,414 DEBUG [hconnection-0x3c40b238-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,415 DEBUG [hconnection-0x10c84c72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,415 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,415 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,415 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,416 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,416 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,416 DEBUG [hconnection-0x752a7e4e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,416 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,416 DEBUG [hconnection-0x7a373f3d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,417 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,417 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,418 DEBUG [hconnection-0x181b269b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,420 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,420 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:45,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T13:25:45,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:45,422 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:45,423 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:45,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:45,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:45,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:45,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:45,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109205437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109205438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109205439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109205439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,446 DEBUG [hconnection-0x1b9962ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:25:45,447 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:25:45,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109205449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7fd46ae62fdb4525b31116511aa09b3b is 50, key is test_row_0/A:col10/1732109145423/Put/seqid=0 2024-11-20T13:25:45,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742301_1477 (size=12001) 2024-11-20T13:25:45,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:45,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109205540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109205550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T13:25:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:45,728 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T13:25:45,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:45,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109205743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109205743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109205744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109205745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109205754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7fd46ae62fdb4525b31116511aa09b3b 2024-11-20T13:25:45,881 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:45,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T13:25:45,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:45,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:45,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:45,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9f51919d0c1e41748bb1fc2df04a0c92 is 50, key is test_row_0/B:col10/1732109145423/Put/seqid=0 2024-11-20T13:25:45,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742302_1478 (size=12001) 2024-11-20T13:25:45,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9f51919d0c1e41748bb1fc2df04a0c92 2024-11-20T13:25:45,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/f48be3db5b8141e8bda297d80bed7dfa is 50, key is test_row_0/C:col10/1732109145423/Put/seqid=0 2024-11-20T13:25:45,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742303_1479 (size=12001) 2024-11-20T13:25:45,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/f48be3db5b8141e8bda297d80bed7dfa 2024-11-20T13:25:45,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7fd46ae62fdb4525b31116511aa09b3b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b 2024-11-20T13:25:45,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:25:45,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9f51919d0c1e41748bb1fc2df04a0c92 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92 2024-11-20T13:25:45,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:25:45,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/f48be3db5b8141e8bda297d80bed7dfa as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa 2024-11-20T13:25:45,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:25:45,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 24db988c4fa8e1a0b1451e8c68b68697 in 551ms, sequenceid=15, compaction requested=false 2024-11-20T13:25:45,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:46,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T13:25:46,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:46,036 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:46,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:46,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:46,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:46,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6881109040984ec484332e67d3905073 is 50, key is test_row_0/A:col10/1732109145436/Put/seqid=0 2024-11-20T13:25:46,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:46,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742304_1480 (size=12001) 2024-11-20T13:25:46,058 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6881109040984ec484332e67d3905073 2024-11-20T13:25:46,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/1e8592d9d2ae4187a06c517e95bee0e4 is 50, key is test_row_0/B:col10/1732109145436/Put/seqid=0 2024-11-20T13:25:46,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109206064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109206066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109206066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109206067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109206068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742305_1481 (size=12001) 2024-11-20T13:25:46,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109206173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109206173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109206173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109206173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109206173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109206378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109206379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109206379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109206380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109206380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,488 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/1e8592d9d2ae4187a06c517e95bee0e4 2024-11-20T13:25:46,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd6571704e494a0d877639e50c0088be is 50, key is test_row_0/C:col10/1732109145436/Put/seqid=0 2024-11-20T13:25:46,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742306_1482 (size=12001) 2024-11-20T13:25:46,501 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd6571704e494a0d877639e50c0088be 2024-11-20T13:25:46,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6881109040984ec484332e67d3905073 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073 2024-11-20T13:25:46,510 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T13:25:46,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/1e8592d9d2ae4187a06c517e95bee0e4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4 2024-11-20T13:25:46,516 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T13:25:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd6571704e494a0d877639e50c0088be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be 2024-11-20T13:25:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:46,529 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T13:25:46,530 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 24db988c4fa8e1a0b1451e8c68b68697 in 494ms, sequenceid=39, compaction requested=false 2024-11-20T13:25:46,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:46,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:46,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T13:25:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T13:25:46,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T13:25:46,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1080 sec 2024-11-20T13:25:46,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.1140 sec 2024-11-20T13:25:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:46,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:25:46,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:46,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:46,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08adb0962e234fad95573cf8d13e5719 is 50, key is test_row_0/A:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:46,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742307_1483 (size=16681) 2024-11-20T13:25:46,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08adb0962e234fad95573cf8d13e5719 2024-11-20T13:25:46,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3db8ec8a39514384ad2b5b24a7c70e23 is 50, key is test_row_0/B:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:46,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742308_1484 (size=12001) 2024-11-20T13:25:46,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109206742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109206742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109206742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109206743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109206743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109206849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109206849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109206850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109206850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109206850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109207056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109207056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109207057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109207057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109207057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3db8ec8a39514384ad2b5b24a7c70e23 2024-11-20T13:25:47,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/67bb5e5857884960aff3f5ec272abce4 is 50, key is test_row_0/C:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742309_1485 (size=12001) 2024-11-20T13:25:47,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109207363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109207363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109207364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109207364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109207365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T13:25:47,526 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T13:25:47,528 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T13:25:47,530 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:47,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T13:25:47,530 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:47,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:47,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/67bb5e5857884960aff3f5ec272abce4 2024-11-20T13:25:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08adb0962e234fad95573cf8d13e5719 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719 2024-11-20T13:25:47,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719, entries=250, sequenceid=56, filesize=16.3 K 2024-11-20T13:25:47,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3db8ec8a39514384ad2b5b24a7c70e23 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23 2024-11-20T13:25:47,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T13:25:47,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/67bb5e5857884960aff3f5ec272abce4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4 2024-11-20T13:25:47,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T13:25:47,580 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T13:25:47,580 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T13:25:47,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 24db988c4fa8e1a0b1451e8c68b68697 in 891ms, sequenceid=56, compaction requested=true 2024-11-20T13:25:47,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:47,582 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:47,582 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:47,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:47,590 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:47,591 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:47,591 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:47,591 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=39.7 K 2024-11-20T13:25:47,591 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:47,591 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:47,591 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:47,591 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=35.2 K 2024-11-20T13:25:47,591 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd46ae62fdb4525b31116511aa09b3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109145421 2024-11-20T13:25:47,592 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f51919d0c1e41748bb1fc2df04a0c92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109145421 2024-11-20T13:25:47,592 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6881109040984ec484332e67d3905073, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732109145436 2024-11-20T13:25:47,592 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e8592d9d2ae4187a06c517e95bee0e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732109145436 2024-11-20T13:25:47,593 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08adb0962e234fad95573cf8d13e5719, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:47,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3db8ec8a39514384ad2b5b24a7c70e23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:47,610 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:47,611 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/4c934287fcaf4e20998d2db860f3eb6e is 50, key is test_row_0/B:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:47,620 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:47,621 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/c2d15be7dacc49a6bc328890fea2ea17 is 50, key is test_row_0/A:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:47,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:25:47,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T13:25:47,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742310_1486 (size=12104) 2024-11-20T13:25:47,668 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/4c934287fcaf4e20998d2db860f3eb6e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c934287fcaf4e20998d2db860f3eb6e 2024-11-20T13:25:47,674 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into 4c934287fcaf4e20998d2db860f3eb6e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:47,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:47,674 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109147582; duration=0sec 2024-11-20T13:25:47,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:47,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:47,674 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:47,675 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:47,675 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:47,675 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:47,675 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=35.2 K 2024-11-20T13:25:47,676 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f48be3db5b8141e8bda297d80bed7dfa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109145421 2024-11-20T13:25:47,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting dd6571704e494a0d877639e50c0088be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732109145436 2024-11-20T13:25:47,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 67bb5e5857884960aff3f5ec272abce4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:47,683 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T13:25:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:47,684 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:47,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:47,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742311_1487 (size=12104) 2024-11-20T13:25:47,694 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/c2d15be7dacc49a6bc328890fea2ea17 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c2d15be7dacc49a6bc328890fea2ea17 2024-11-20T13:25:47,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2306082589a047e69f9cfadabeb5b041 is 50, key is test_row_0/A:col10/1732109146742/Put/seqid=0 2024-11-20T13:25:47,701 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into c2d15be7dacc49a6bc328890fea2ea17(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:47,701 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:47,701 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109147581; duration=0sec 2024-11-20T13:25:47,701 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:47,701 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:47,706 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:47,706 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/d8a43a68c16246eaae422c0e870e9e61 is 50, key is test_row_0/C:col10/1732109146061/Put/seqid=0 2024-11-20T13:25:47,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742313_1489 (size=12104) 2024-11-20T13:25:47,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742312_1488 (size=12001) 2024-11-20T13:25:47,752 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/d8a43a68c16246eaae422c0e870e9e61 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/d8a43a68c16246eaae422c0e870e9e61 2024-11-20T13:25:47,758 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into d8a43a68c16246eaae422c0e870e9e61(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:47,758 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:47,758 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109147582; duration=0sec 2024-11-20T13:25:47,758 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:47,758 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:47,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T13:25:47,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:47,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:47,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109207881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109207882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109207882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109207883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109207887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109207988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109207988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109207988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109207988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:47,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:47,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109207991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T13:25:48,139 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2306082589a047e69f9cfadabeb5b041 2024-11-20T13:25:48,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/4c8c2c92563b491e8f6820ad2c9ffdda is 50, key is test_row_0/B:col10/1732109146742/Put/seqid=0 2024-11-20T13:25:48,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742314_1490 (size=12001) 2024-11-20T13:25:48,163 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/4c8c2c92563b491e8f6820ad2c9ffdda 2024-11-20T13:25:48,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 is 50, key is test_row_0/C:col10/1732109146742/Put/seqid=0 2024-11-20T13:25:48,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742315_1491 (size=12001) 2024-11-20T13:25:48,179 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 2024-11-20T13:25:48,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2306082589a047e69f9cfadabeb5b041 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041 2024-11-20T13:25:48,188 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:25:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/4c8c2c92563b491e8f6820ad2c9ffdda as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda 2024-11-20T13:25:48,195 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:25:48,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 2024-11-20T13:25:48,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109208194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109208194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109208195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,200 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T13:25:48,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109208195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,201 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 24db988c4fa8e1a0b1451e8c68b68697 in 518ms, sequenceid=76, compaction requested=false 2024-11-20T13:25:48,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:48,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:48,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T13:25:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T13:25:48,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T13:25:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:48,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T13:25:48,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 672 msec 2024-11-20T13:25:48,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 676 msec 2024-11-20T13:25:48,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3d064d21581a4f2f9dafe1c8b1ef5040 is 50, key is test_row_0/A:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742316_1492 (size=14341) 2024-11-20T13:25:48,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3d064d21581a4f2f9dafe1c8b1ef5040 2024-11-20T13:25:48,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7d9ac50e8f66461fae633b150ffdc9ed is 50, key is test_row_0/B:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742317_1493 (size=12001) 2024-11-20T13:25:48,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7d9ac50e8f66461fae633b150ffdc9ed 2024-11-20T13:25:48,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd508aab823846759432be4a2a254354 is 50, key is test_row_0/C:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742318_1494 (size=12001) 2024-11-20T13:25:48,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109208291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109208400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109208500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109208500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109208501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109208502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109208604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd508aab823846759432be4a2a254354 2024-11-20T13:25:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T13:25:48,633 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T13:25:48,634 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T13:25:48,636 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:48,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T13:25:48,636 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:48,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:48,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3d064d21581a4f2f9dafe1c8b1ef5040 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040 2024-11-20T13:25:48,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040, entries=200, sequenceid=96, filesize=14.0 K 2024-11-20T13:25:48,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7d9ac50e8f66461fae633b150ffdc9ed as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed 2024-11-20T13:25:48,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T13:25:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dd508aab823846759432be4a2a254354 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354 2024-11-20T13:25:48,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354, entries=150, sequenceid=96, filesize=11.7 K 2024-11-20T13:25:48,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 24db988c4fa8e1a0b1451e8c68b68697 in 457ms, sequenceid=96, compaction requested=true 2024-11-20T13:25:48,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:48,659 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:48,659 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:48,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:48,660 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:48,660 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:48,660 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:48,660 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c2d15be7dacc49a6bc328890fea2ea17, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=37.5 K 2024-11-20T13:25:48,660 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:48,660 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:48,660 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:48,660 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c934287fcaf4e20998d2db860f3eb6e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=35.3 K 2024-11-20T13:25:48,661 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c934287fcaf4e20998d2db860f3eb6e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:48,661 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2d15be7dacc49a6bc328890fea2ea17, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:48,661 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c8c2c92563b491e8f6820ad2c9ffdda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109146707 2024-11-20T13:25:48,661 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2306082589a047e69f9cfadabeb5b041, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109146707 2024-11-20T13:25:48,661 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d9ac50e8f66461fae633b150ffdc9ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:48,662 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d064d21581a4f2f9dafe1c8b1ef5040, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:48,671 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#411 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:48,671 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d3d948a3f3124ce6a4c625585d6f18d7 is 50, key is test_row_0/A:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,675 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:48,676 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/e5ff809939c9404aaf37a59e0b5ed2d0 is 50, key is test_row_0/B:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742319_1495 (size=12207) 2024-11-20T13:25:48,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742320_1496 (size=12207) 2024-11-20T13:25:48,696 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d3d948a3f3124ce6a4c625585d6f18d7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d3d948a3f3124ce6a4c625585d6f18d7 2024-11-20T13:25:48,699 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/e5ff809939c9404aaf37a59e0b5ed2d0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/e5ff809939c9404aaf37a59e0b5ed2d0 2024-11-20T13:25:48,705 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into d3d948a3f3124ce6a4c625585d6f18d7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:48,706 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:48,706 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109148658; duration=0sec 2024-11-20T13:25:48,706 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:48,706 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:48,706 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:48,707 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:48,707 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into e5ff809939c9404aaf37a59e0b5ed2d0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:48,707 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:48,707 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:48,707 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:48,707 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109148659; duration=0sec 2024-11-20T13:25:48,707 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/d8a43a68c16246eaae422c0e870e9e61, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=35.3 K 2024-11-20T13:25:48,707 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:48,708 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:48,709 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8a43a68c16246eaae422c0e870e9e61, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732109146061 2024-11-20T13:25:48,709 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcf6a7ceca2d4f89b6a966eb0b596ea8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732109146707 2024-11-20T13:25:48,709 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd508aab823846759432be4a2a254354, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:48,717 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#413 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:48,717 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/2342bb2b7c50446d9be5d58f6b07caa3 is 50, key is test_row_0/C:col10/1732109148200/Put/seqid=0 2024-11-20T13:25:48,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742321_1497 (size=12207) 2024-11-20T13:25:48,735 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/2342bb2b7c50446d9be5d58f6b07caa3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2342bb2b7c50446d9be5d58f6b07caa3 2024-11-20T13:25:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T13:25:48,741 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 2342bb2b7c50446d9be5d58f6b07caa3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:48,741 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:48,741 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109148659; duration=0sec 2024-11-20T13:25:48,741 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:48,741 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:48,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:48,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:48,789 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2abc462393dc49eea34ac25eab9a91f8 is 50, key is test_row_0/A:col10/1732109148277/Put/seqid=0 2024-11-20T13:25:48,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742322_1498 (size=12001) 2024-11-20T13:25:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:48,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:48,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T13:25:48,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:48,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109208976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109209004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109209006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109209009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109209009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109209080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,210 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2abc462393dc49eea34ac25eab9a91f8 2024-11-20T13:25:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/37b0c0001b504b508e4fd37261c11775 is 50, key is test_row_0/B:col10/1732109148277/Put/seqid=0 2024-11-20T13:25:49,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742323_1499 (size=12001) 2024-11-20T13:25:49,230 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/37b0c0001b504b508e4fd37261c11775 2024-11-20T13:25:49,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/12a511d6e2d8486b8101467303a71cbd is 50, key is test_row_0/C:col10/1732109148277/Put/seqid=0 2024-11-20T13:25:49,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T13:25:49,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742324_1500 (size=12001) 2024-11-20T13:25:49,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109209285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109209587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,646 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/12a511d6e2d8486b8101467303a71cbd 2024-11-20T13:25:49,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2abc462393dc49eea34ac25eab9a91f8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8 2024-11-20T13:25:49,655 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T13:25:49,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/37b0c0001b504b508e4fd37261c11775 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775 2024-11-20T13:25:49,660 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T13:25:49,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/12a511d6e2d8486b8101467303a71cbd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd 2024-11-20T13:25:49,665 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T13:25:49,666 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 24db988c4fa8e1a0b1451e8c68b68697 in 877ms, sequenceid=117, compaction requested=false 2024-11-20T13:25:49,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:49,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:49,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T13:25:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T13:25:49,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T13:25:49,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-20T13:25:49,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.0350 sec 2024-11-20T13:25:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T13:25:49,740 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T13:25:49,741 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T13:25:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T13:25:49,743 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:49,744 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:49,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T13:25:49,895 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:49,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T13:25:49,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:49,896 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:49,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:49,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/1c906134a59d4e7faf252754fe118ab9 is 50, key is test_row_0/A:col10/1732109148975/Put/seqid=0 2024-11-20T13:25:49,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742325_1501 (size=12151) 2024-11-20T13:25:50,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:50,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T13:25:50,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109210038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109210040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109210046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109210048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109210092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109210148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109210149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109210156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109210157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,307 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/1c906134a59d4e7faf252754fe118ab9 2024-11-20T13:25:50,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80353f5555c4421d9137e3da98208d95 is 50, key is test_row_0/B:col10/1732109148975/Put/seqid=0 2024-11-20T13:25:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742326_1502 (size=12151) 2024-11-20T13:25:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T13:25:50,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109210355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109210356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109210361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109210361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109210660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109210660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109210669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109210669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:50,719 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80353f5555c4421d9137e3da98208d95 2024-11-20T13:25:50,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3545c9d5c9f345fc9b03ab1a80f43bec is 50, key is test_row_0/C:col10/1732109148975/Put/seqid=0 2024-11-20T13:25:50,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742327_1503 (size=12151) 2024-11-20T13:25:50,735 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3545c9d5c9f345fc9b03ab1a80f43bec 2024-11-20T13:25:50,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/1c906134a59d4e7faf252754fe118ab9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9 2024-11-20T13:25:50,743 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9, entries=150, sequenceid=135, filesize=11.9 K 2024-11-20T13:25:50,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80353f5555c4421d9137e3da98208d95 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95 2024-11-20T13:25:50,747 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95, entries=150, sequenceid=135, filesize=11.9 K 2024-11-20T13:25:50,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3545c9d5c9f345fc9b03ab1a80f43bec as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec 2024-11-20T13:25:50,752 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec, entries=150, sequenceid=135, filesize=11.9 K 2024-11-20T13:25:50,753 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 24db988c4fa8e1a0b1451e8c68b68697 in 856ms, sequenceid=135, compaction requested=true 2024-11-20T13:25:50,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:50,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:50,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T13:25:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T13:25:50,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T13:25:50,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0100 sec 2024-11-20T13:25:50,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.0140 sec 2024-11-20T13:25:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T13:25:50,846 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T13:25:50,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:50,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T13:25:50,849 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T13:25:50,850 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:50,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:50,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T13:25:51,002 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:51,003 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d6be1c2c892b443e92aaae103294ed1c is 50, key is test_row_0/A:col10/1732109150038/Put/seqid=0 2024-11-20T13:25:51,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742328_1504 (size=12151) 2024-11-20T13:25:51,017 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d6be1c2c892b443e92aaae103294ed1c 2024-11-20T13:25:51,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/47e2ff099e244fa6b8bca49b42874bdc is 50, key is test_row_0/B:col10/1732109150038/Put/seqid=0 2024-11-20T13:25:51,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742329_1505 (size=12151) 2024-11-20T13:25:51,032 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/47e2ff099e244fa6b8bca49b42874bdc 2024-11-20T13:25:51,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/a2d3450c64a146b1bb32f47119a15df9 is 50, key is test_row_0/C:col10/1732109150038/Put/seqid=0 2024-11-20T13:25:51,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742330_1506 (size=12151) 2024-11-20T13:25:51,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:51,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T13:25:51,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109211171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109211171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109211173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109211177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109211177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109211280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109211284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109211284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,442 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/a2d3450c64a146b1bb32f47119a15df9 2024-11-20T13:25:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d6be1c2c892b443e92aaae103294ed1c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c 2024-11-20T13:25:51,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T13:25:51,455 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T13:25:51,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/47e2ff099e244fa6b8bca49b42874bdc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc 2024-11-20T13:25:51,462 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T13:25:51,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/a2d3450c64a146b1bb32f47119a15df9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9 2024-11-20T13:25:51,466 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9, entries=150, sequenceid=154, filesize=11.9 K 2024-11-20T13:25:51,466 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 24db988c4fa8e1a0b1451e8c68b68697 in 463ms, sequenceid=154, compaction requested=true 2024-11-20T13:25:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T13:25:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T13:25:51,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T13:25:51,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 618 msec 2024-11-20T13:25:51,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 626 msec 2024-11-20T13:25:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:51,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T13:25:51,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:51,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:51,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fbc47ce021b7450684f8c72202cf668d is 50, key is test_row_0/A:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:51,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742331_1507 (size=16931) 2024-11-20T13:25:51,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fbc47ce021b7450684f8c72202cf668d 2024-11-20T13:25:51,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109211527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a3c01beae564409a222ad584c1d9573 is 50, key is test_row_0/B:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:51,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109211528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742332_1508 (size=12151) 2024-11-20T13:25:51,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109211529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109211635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109211635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109211639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109211838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109211838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109211846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:51,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a3c01beae564409a222ad584c1d9573 2024-11-20T13:25:51,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/19a1fab2bb6e4c01a5b92c64bfda95b0 is 50, key is test_row_0/C:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:51,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742333_1509 (size=12151) 2024-11-20T13:25:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T13:25:51,953 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T13:25:51,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T13:25:51,956 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T13:25:51,956 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:51,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T13:25:52,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T13:25:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109212143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109212145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109212152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109212178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109212179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T13:25:52,265 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T13:25:52,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:52,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:52,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/19a1fab2bb6e4c01a5b92c64bfda95b0 2024-11-20T13:25:52,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fbc47ce021b7450684f8c72202cf668d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d 2024-11-20T13:25:52,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d, entries=250, sequenceid=173, filesize=16.5 K 2024-11-20T13:25:52,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a3c01beae564409a222ad584c1d9573 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573 2024-11-20T13:25:52,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T13:25:52,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/19a1fab2bb6e4c01a5b92c64bfda95b0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0 2024-11-20T13:25:52,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T13:25:52,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 24db988c4fa8e1a0b1451e8c68b68697 in 878ms, sequenceid=173, compaction requested=true 2024-11-20T13:25:52,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:52,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:52,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:52,372 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T13:25:52,372 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T13:25:52,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:52,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:52,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:52,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:52,373 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T13:25:52,373 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 65441 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T13:25:52,373 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:52,373 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:52,373 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,373 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,373 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d3d948a3f3124ce6a4c625585d6f18d7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=63.9 K 2024-11-20T13:25:52,373 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/e5ff809939c9404aaf37a59e0b5ed2d0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=59.2 K 2024-11-20T13:25:52,374 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3d948a3f3124ce6a4c625585d6f18d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:52,374 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting e5ff809939c9404aaf37a59e0b5ed2d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:52,374 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2abc462393dc49eea34ac25eab9a91f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732109148277 2024-11-20T13:25:52,374 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 37b0c0001b504b508e4fd37261c11775, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732109148277 2024-11-20T13:25:52,375 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c906134a59d4e7faf252754fe118ab9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732109148959 2024-11-20T13:25:52,375 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 80353f5555c4421d9137e3da98208d95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732109148959 2024-11-20T13:25:52,375 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 47e2ff099e244fa6b8bca49b42874bdc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109150038 2024-11-20T13:25:52,375 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6be1c2c892b443e92aaae103294ed1c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109150038 2024-11-20T13:25:52,375 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbc47ce021b7450684f8c72202cf668d, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151159 2024-11-20T13:25:52,376 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a3c01beae564409a222ad584c1d9573, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151176 2024-11-20T13:25:52,396 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#426 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:52,397 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/1cc9a44def8449a3b1ff04bb3f921700 is 50, key is test_row_0/A:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:52,413 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#427 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:52,414 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/b5b9637b29f44e3b98cdf3ec08bff2af is 50, key is test_row_0/B:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:52,419 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T13:25:52,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,420 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T13:25:52,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:52,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742334_1510 (size=12527) 2024-11-20T13:25:52,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e0c3fc96472c4e058da5421b73ccb779 is 50, key is test_row_0/A:col10/1732109151524/Put/seqid=0 2024-11-20T13:25:52,450 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/1cc9a44def8449a3b1ff04bb3f921700 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1cc9a44def8449a3b1ff04bb3f921700 2024-11-20T13:25:52,456 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into 1cc9a44def8449a3b1ff04bb3f921700(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:52,457 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:52,457 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=11, startTime=1732109152371; duration=0sec 2024-11-20T13:25:52,457 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:52,457 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:52,457 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T13:25:52,459 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60661 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T13:25:52,459 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:52,459 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,459 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2342bb2b7c50446d9be5d58f6b07caa3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=59.2 K 2024-11-20T13:25:52,459 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2342bb2b7c50446d9be5d58f6b07caa3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732109147881 2024-11-20T13:25:52,460 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12a511d6e2d8486b8101467303a71cbd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732109148277 2024-11-20T13:25:52,460 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3545c9d5c9f345fc9b03ab1a80f43bec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732109148959 2024-11-20T13:25:52,461 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2d3450c64a146b1bb32f47119a15df9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732109150038 2024-11-20T13:25:52,461 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19a1fab2bb6e4c01a5b92c64bfda95b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151176 2024-11-20T13:25:52,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742335_1511 (size=12527) 2024-11-20T13:25:52,492 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#429 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:52,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742336_1512 (size=12151) 2024-11-20T13:25:52,494 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/b5b9637b29f44e3b98cdf3ec08bff2af as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b5b9637b29f44e3b98cdf3ec08bff2af 2024-11-20T13:25:52,493 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/84d813dee8f8465c90dcb71129140f8c is 50, key is test_row_0/C:col10/1732109151492/Put/seqid=0 2024-11-20T13:25:52,496 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e0c3fc96472c4e058da5421b73ccb779 2024-11-20T13:25:52,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/19013e59355b49bc90cf16add05a1a27 is 50, key is test_row_0/B:col10/1732109151524/Put/seqid=0 2024-11-20T13:25:52,507 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into b5b9637b29f44e3b98cdf3ec08bff2af(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:52,507 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:52,507 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=11, startTime=1732109152372; duration=0sec 2024-11-20T13:25:52,507 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:52,507 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:52,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742337_1513 (size=12527) 2024-11-20T13:25:52,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/84d813dee8f8465c90dcb71129140f8c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/84d813dee8f8465c90dcb71129140f8c 2024-11-20T13:25:52,531 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 84d813dee8f8465c90dcb71129140f8c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:52,531 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:52,531 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=11, startTime=1732109152372; duration=0sec 2024-11-20T13:25:52,531 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:52,531 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:52,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742338_1514 (size=12151) 2024-11-20T13:25:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T13:25:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:52,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:52,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109212684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109212685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109212685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109212791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109212791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109212794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:52,943 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/19013e59355b49bc90cf16add05a1a27 2024-11-20T13:25:52,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/2e3bc0e0f1a948fda62731ac8c2eb295 is 50, key is test_row_0/C:col10/1732109151524/Put/seqid=0 2024-11-20T13:25:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742339_1515 (size=12151) 2024-11-20T13:25:52,966 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/2e3bc0e0f1a948fda62731ac8c2eb295 2024-11-20T13:25:52,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e0c3fc96472c4e058da5421b73ccb779 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779 2024-11-20T13:25:52,975 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779, entries=150, sequenceid=190, filesize=11.9 K 2024-11-20T13:25:52,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/19013e59355b49bc90cf16add05a1a27 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27 2024-11-20T13:25:52,979 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27, entries=150, sequenceid=190, filesize=11.9 K 2024-11-20T13:25:52,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T13:25:52,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/2e3bc0e0f1a948fda62731ac8c2eb295 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295 2024-11-20T13:25:52,984 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295, entries=150, sequenceid=190, filesize=11.9 K 2024-11-20T13:25:52,985 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 24db988c4fa8e1a0b1451e8c68b68697 in 565ms, sequenceid=190, compaction requested=false 2024-11-20T13:25:52,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:52,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:52,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T13:25:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T13:25:52,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T13:25:52,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-20T13:25:52,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.0340 sec 2024-11-20T13:25:53,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:53,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08339bdae526402f9ec3a36a78067a15 is 50, key is test_row_0/A:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742340_1516 (size=14541) 2024-11-20T13:25:53,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08339bdae526402f9ec3a36a78067a15 2024-11-20T13:25:53,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/2405a5dc4a95406599220981b83f47d3 is 50, key is test_row_0/B:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742341_1517 (size=12151) 2024-11-20T13:25:53,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T13:25:53,064 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T13:25:53,066 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T13:25:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:53,068 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:53,069 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:53,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:53,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:53,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T13:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:53,373 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T13:25:53,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:53,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:53,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/2405a5dc4a95406599220981b83f47d3 2024-11-20T13:25:53,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/817baf0ba65042648d32a84a32d6caeb is 50, key is test_row_0/C:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742342_1518 (size=12151) 2024-11-20T13:25:53,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/817baf0ba65042648d32a84a32d6caeb 2024-11-20T13:25:53,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/08339bdae526402f9ec3a36a78067a15 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15 2024-11-20T13:25:53,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15, entries=200, sequenceid=214, filesize=14.2 K 2024-11-20T13:25:53,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/2405a5dc4a95406599220981b83f47d3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3 2024-11-20T13:25:53,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T13:25:53,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/817baf0ba65042648d32a84a32d6caeb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb 2024-11-20T13:25:53,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T13:25:53,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 24db988c4fa8e1a0b1451e8c68b68697 in 468ms, sequenceid=214, compaction requested=true 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:53,468 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:53,468 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:53,470 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:53,470 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:53,470 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,470 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1cc9a44def8449a3b1ff04bb3f921700, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=38.3 K 2024-11-20T13:25:53,471 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:53,471 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:53,471 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cc9a44def8449a3b1ff04bb3f921700, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151176 2024-11-20T13:25:53,471 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,471 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b5b9637b29f44e3b98cdf3ec08bff2af, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.0 K 2024-11-20T13:25:53,471 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b5b9637b29f44e3b98cdf3ec08bff2af, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151176 2024-11-20T13:25:53,471 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0c3fc96472c4e058da5421b73ccb779, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732109151524 2024-11-20T13:25:53,472 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 19013e59355b49bc90cf16add05a1a27, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732109151524 2024-11-20T13:25:53,472 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08339bdae526402f9ec3a36a78067a15, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152675 2024-11-20T13:25:53,472 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2405a5dc4a95406599220981b83f47d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152683 2024-11-20T13:25:53,481 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:53,482 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/a538056270cf453da256a4a712924bd5 is 50, key is test_row_0/A:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,483 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#436 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:53,483 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/bf073f2b54194d418cc8e6efaac88f43 is 50, key is test_row_0/B:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742343_1519 (size=12629) 2024-11-20T13:25:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742344_1520 (size=12629) 2024-11-20T13:25:53,495 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/bf073f2b54194d418cc8e6efaac88f43 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/bf073f2b54194d418cc8e6efaac88f43 2024-11-20T13:25:53,501 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into bf073f2b54194d418cc8e6efaac88f43(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:53,501 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:53,501 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109153468; duration=0sec 2024-11-20T13:25:53,501 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:53,501 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:53,501 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:53,503 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:53,503 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:53,504 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,504 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/84d813dee8f8465c90dcb71129140f8c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.0 K 2024-11-20T13:25:53,504 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 84d813dee8f8465c90dcb71129140f8c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732109151176 2024-11-20T13:25:53,505 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e3bc0e0f1a948fda62731ac8c2eb295, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732109151524 2024-11-20T13:25:53,505 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 817baf0ba65042648d32a84a32d6caeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152683 2024-11-20T13:25:53,513 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#437 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:53,513 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/6af369bc751149c6b661052b2881ee58 is 50, key is test_row_0/C:col10/1732109152999/Put/seqid=0 2024-11-20T13:25:53,526 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T13:25:53,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:53,528 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:53,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742345_1521 (size=12629) 2024-11-20T13:25:53,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/c4c6940f3dc04d57a15ad763d97bfa5d is 50, key is test_row_0/A:col10/1732109153019/Put/seqid=0 2024-11-20T13:25:53,544 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/6af369bc751149c6b661052b2881ee58 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6af369bc751149c6b661052b2881ee58 2024-11-20T13:25:53,549 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 6af369bc751149c6b661052b2881ee58(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:53,549 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:53,549 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109153468; duration=0sec 2024-11-20T13:25:53,549 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:53,549 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742346_1522 (size=9757) 2024-11-20T13:25:53,550 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/c4c6940f3dc04d57a15ad763d97bfa5d 2024-11-20T13:25:53,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/55b8bb98d802419f9be8a885a1f5a4c9 is 50, key is test_row_0/B:col10/1732109153019/Put/seqid=0 2024-11-20T13:25:53,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742347_1523 (size=9757) 2024-11-20T13:25:53,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:53,670 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:25:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,892 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/a538056270cf453da256a4a712924bd5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a538056270cf453da256a4a712924bd5 2024-11-20T13:25:53,896 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into a538056270cf453da256a4a712924bd5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:53,896 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:53,896 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109153468; duration=0sec 2024-11-20T13:25:53,896 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:53,896 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:53,962 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/55b8bb98d802419f9be8a885a1f5a4c9 2024-11-20T13:25:53,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/1ba25dcd0e1849a3b54d9ba0e908dafe is 50, key is test_row_0/C:col10/1732109153019/Put/seqid=0 2024-11-20T13:25:53,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742348_1524 (size=9757) 2024-11-20T13:25:53,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109213988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109213992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:53,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:53,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109213993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:54,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109214195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,198 DEBUG [Thread-2120 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:25:54,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109214200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,204 DEBUG [Thread-2124 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:25:54,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109214294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109214295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109214301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,375 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/1ba25dcd0e1849a3b54d9ba0e908dafe 2024-11-20T13:25:54,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/c4c6940f3dc04d57a15ad763d97bfa5d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d 2024-11-20T13:25:54,386 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d, entries=100, sequenceid=230, filesize=9.5 K 2024-11-20T13:25:54,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/55b8bb98d802419f9be8a885a1f5a4c9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9 2024-11-20T13:25:54,391 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9, entries=100, sequenceid=230, filesize=9.5 K 2024-11-20T13:25:54,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/1ba25dcd0e1849a3b54d9ba0e908dafe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe 2024-11-20T13:25:54,395 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe, entries=100, sequenceid=230, filesize=9.5 K 2024-11-20T13:25:54,396 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 24db988c4fa8e1a0b1451e8c68b68697 in 868ms, sequenceid=230, compaction requested=false 2024-11-20T13:25:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T13:25:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T13:25:54,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T13:25:54,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3280 sec 2024-11-20T13:25:54,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.3400 sec 2024-11-20T13:25:54,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:54,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:54,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:54,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7e3a11fe32cd4be5ba6d2cb819c252db is 50, key is test_row_0/A:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:54,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742349_1525 (size=14541) 2024-11-20T13:25:54,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109214822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109214828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109214829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109214931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109214931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:54,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109214932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109215133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109215134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109215139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T13:25:55,173 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T13:25:55,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:55,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-20T13:25:55,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T13:25:55,176 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:55,176 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:55,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:55,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7e3a11fe32cd4be5ba6d2cb819c252db 2024-11-20T13:25:55,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/5b11d7746095487bab6947619497ab8f is 50, key is test_row_0/B:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:55,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742350_1526 (size=12151) 2024-11-20T13:25:55,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T13:25:55,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T13:25:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109215439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109215440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109215443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T13:25:55,482 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T13:25:55,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:55,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/5b11d7746095487bab6947619497ab8f 2024-11-20T13:25:55,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T13:25:55,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:55,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:55,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0b9aadb13d9d427faf3af503b6a5c969 is 50, key is test_row_0/C:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:55,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742351_1527 (size=12151) 2024-11-20T13:25:55,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0b9aadb13d9d427faf3af503b6a5c969 2024-11-20T13:25:55,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7e3a11fe32cd4be5ba6d2cb819c252db as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db 2024-11-20T13:25:55,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db, entries=200, sequenceid=254, filesize=14.2 K 2024-11-20T13:25:55,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/5b11d7746095487bab6947619497ab8f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f 2024-11-20T13:25:55,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T13:25:55,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0b9aadb13d9d427faf3af503b6a5c969 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969 2024-11-20T13:25:55,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969, entries=150, sequenceid=254, filesize=11.9 K 2024-11-20T13:25:55,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 24db988c4fa8e1a0b1451e8c68b68697 in 871ms, sequenceid=254, compaction requested=true 2024-11-20T13:25:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:55,674 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:55,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:55,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:55,675 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:55,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:55,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:55,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:55,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:55,677 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,677 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,677 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a538056270cf453da256a4a712924bd5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.1 K 2024-11-20T13:25:55,677 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/bf073f2b54194d418cc8e6efaac88f43, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=33.7 K 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a538056270cf453da256a4a712924bd5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152683 2024-11-20T13:25:55,677 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting bf073f2b54194d418cc8e6efaac88f43, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152683 2024-11-20T13:25:55,678 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c6940f3dc04d57a15ad763d97bfa5d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732109153019 2024-11-20T13:25:55,678 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 55b8bb98d802419f9be8a885a1f5a4c9, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732109153019 2024-11-20T13:25:55,678 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e3a11fe32cd4be5ba6d2cb819c252db, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:55,679 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b11d7746095487bab6947619497ab8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:55,686 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#444 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:55,687 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/251cb6810656474da79ea12cd8c13211 is 50, key is test_row_0/A:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:55,706 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#445 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:55,706 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/c95924044848465a9f1b96d63f504afb is 50, key is test_row_0/B:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:55,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742352_1528 (size=12731) 2024-11-20T13:25:55,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742353_1529 (size=12731) 2024-11-20T13:25:55,724 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/251cb6810656474da79ea12cd8c13211 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/251cb6810656474da79ea12cd8c13211 2024-11-20T13:25:55,729 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/c95924044848465a9f1b96d63f504afb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c95924044848465a9f1b96d63f504afb 2024-11-20T13:25:55,729 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into 251cb6810656474da79ea12cd8c13211(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:55,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:55,729 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109155674; duration=0sec 2024-11-20T13:25:55,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:55,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:55,729 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:55,732 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:55,732 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:55,733 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,733 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6af369bc751149c6b661052b2881ee58, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=33.7 K 2024-11-20T13:25:55,733 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6af369bc751149c6b661052b2881ee58, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732109152683 2024-11-20T13:25:55,734 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ba25dcd0e1849a3b54d9ba0e908dafe, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732109153019 2024-11-20T13:25:55,735 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b9aadb13d9d427faf3af503b6a5c969, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:55,737 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into c95924044848465a9f1b96d63f504afb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:55,737 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:55,738 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109155675; duration=0sec 2024-11-20T13:25:55,738 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:55,738 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:55,745 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#446 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:55,745 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/5e501b8f8389463eb88875115144fbb3 is 50, key is test_row_0/C:col10/1732109154802/Put/seqid=0 2024-11-20T13:25:55,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742354_1530 (size=12731) 2024-11-20T13:25:55,755 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/5e501b8f8389463eb88875115144fbb3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/5e501b8f8389463eb88875115144fbb3 2024-11-20T13:25:55,761 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 5e501b8f8389463eb88875115144fbb3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:55,761 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:55,761 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109155676; duration=0sec 2024-11-20T13:25:55,762 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:55,762 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T13:25:55,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:55,789 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:55,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:55,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fb53bbb8a9cf41269e0a23325a2a51cc is 50, key is test_row_0/A:col10/1732109154820/Put/seqid=0 2024-11-20T13:25:55,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742355_1531 (size=12301) 2024-11-20T13:25:55,804 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fb53bbb8a9cf41269e0a23325a2a51cc 2024-11-20T13:25:55,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a5c2bcf0b5a94f4290d50777c4472c4d is 50, key is test_row_0/B:col10/1732109154820/Put/seqid=0 2024-11-20T13:25:55,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742356_1532 (size=12301) 2024-11-20T13:25:55,823 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a5c2bcf0b5a94f4290d50777c4472c4d 2024-11-20T13:25:55,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dff161644bce4059ae8ef91ca54cbc6b is 50, key is test_row_0/C:col10/1732109154820/Put/seqid=0 2024-11-20T13:25:55,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742357_1533 (size=12301) 2024-11-20T13:25:55,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:55,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109215977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109215980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:55,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109215982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109216085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109216087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109216088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,243 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dff161644bce4059ae8ef91ca54cbc6b 2024-11-20T13:25:56,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/fb53bbb8a9cf41269e0a23325a2a51cc as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc 2024-11-20T13:25:56,250 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc, entries=150, sequenceid=271, filesize=12.0 K 2024-11-20T13:25:56,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a5c2bcf0b5a94f4290d50777c4472c4d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d 2024-11-20T13:25:56,254 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d, entries=150, sequenceid=271, filesize=12.0 K 2024-11-20T13:25:56,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/dff161644bce4059ae8ef91ca54cbc6b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b 2024-11-20T13:25:56,258 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b, entries=150, sequenceid=271, filesize=12.0 K 2024-11-20T13:25:56,259 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 24db988c4fa8e1a0b1451e8c68b68697 in 470ms, sequenceid=271, compaction requested=false 2024-11-20T13:25:56,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:56,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-20T13:25:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-20T13:25:56,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T13:25:56,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0840 sec 2024-11-20T13:25:56,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.0880 sec 2024-11-20T13:25:56,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T13:25:56,279 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T13:25:56,280 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:56,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-20T13:25:56,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:56,282 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:56,282 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:56,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:56,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:56,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:56,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6acb498247d7428eb57f2a44136ef781 is 50, key is test_row_0/A:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:56,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742358_1534 (size=14741) 2024-11-20T13:25:56,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6acb498247d7428eb57f2a44136ef781 2024-11-20T13:25:56,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9874e2454a774fc5a48241e5fde35aa9 is 50, key is test_row_0/B:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:56,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742359_1535 (size=12301) 2024-11-20T13:25:56,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109216315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109216316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109216320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:56,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109216421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109216421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109216425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,433 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:56,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:56,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:56,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:56,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:56,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109216624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109216624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109216630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9874e2454a774fc5a48241e5fde35aa9 2024-11-20T13:25:56,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3820e06aca9444fcacb345a916c3d91e is 50, key is test_row_0/C:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742360_1536 (size=12301) 2024-11-20T13:25:56,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:56,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:56,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:56,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:56,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109216930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109216932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:56,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109216937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:57,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:57,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:57,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:57,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:57,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3820e06aca9444fcacb345a916c3d91e 2024-11-20T13:25:57,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/6acb498247d7428eb57f2a44136ef781 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781 2024-11-20T13:25:57,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781, entries=200, sequenceid=294, filesize=14.4 K 2024-11-20T13:25:57,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9874e2454a774fc5a48241e5fde35aa9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9 2024-11-20T13:25:57,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T13:25:57,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3820e06aca9444fcacb345a916c3d91e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e 2024-11-20T13:25:57,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T13:25:57,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 24db988c4fa8e1a0b1451e8c68b68697 in 861ms, sequenceid=294, compaction requested=true 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:57,154 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:57,154 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:57,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:57,156 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:57,156 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:57,156 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,157 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/251cb6810656474da79ea12cd8c13211, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=38.8 K 2024-11-20T13:25:57,157 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:57,157 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:57,157 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,157 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c95924044848465a9f1b96d63f504afb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.5 K 2024-11-20T13:25:57,157 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 251cb6810656474da79ea12cd8c13211, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:57,158 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c95924044848465a9f1b96d63f504afb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:57,158 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb53bbb8a9cf41269e0a23325a2a51cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732109154815 2024-11-20T13:25:57,158 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a5c2bcf0b5a94f4290d50777c4472c4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732109154815 2024-11-20T13:25:57,158 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9874e2454a774fc5a48241e5fde35aa9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:57,159 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6acb498247d7428eb57f2a44136ef781, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:57,172 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:57,172 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2372a5e564ee40c995c2a6b6cd217d44 is 50, key is test_row_0/A:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:57,178 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:57,179 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d4b6ea62f7bd4cf79777f7a184bf5289 is 50, key is test_row_0/B:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:57,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742361_1537 (size=12983) 2024-11-20T13:25:57,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,197 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:57,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:57,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:57,198 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2372a5e564ee40c995c2a6b6cd217d44 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2372a5e564ee40c995c2a6b6cd217d44 2024-11-20T13:25:57,203 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into 2372a5e564ee40c995c2a6b6cd217d44(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:57,203 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:57,203 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109157154; duration=0sec 2024-11-20T13:25:57,203 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:57,203 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:57,203 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:57,205 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:57,205 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:57,205 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:57,205 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/5e501b8f8389463eb88875115144fbb3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.5 K 2024-11-20T13:25:57,205 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e501b8f8389463eb88875115144fbb3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732109153653 2024-11-20T13:25:57,206 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dff161644bce4059ae8ef91ca54cbc6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732109154815 2024-11-20T13:25:57,207 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3820e06aca9444fcacb345a916c3d91e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:57,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742362_1538 (size=12983) 2024-11-20T13:25:57,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b945c28075064b748d919ac0e4480a3b is 50, key is test_row_0/A:col10/1732109156318/Put/seqid=0 2024-11-20T13:25:57,233 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:57,235 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0a3e68140293407c85478108332453a4 is 50, key is test_row_0/C:col10/1732109156291/Put/seqid=0 2024-11-20T13:25:57,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742364_1540 (size=12983) 2024-11-20T13:25:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742363_1539 (size=12301) 2024-11-20T13:25:57,252 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0a3e68140293407c85478108332453a4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0a3e68140293407c85478108332453a4 2024-11-20T13:25:57,256 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 0a3e68140293407c85478108332453a4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:57,256 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:57,256 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109157154; duration=0sec 2024-11-20T13:25:57,256 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:57,256 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:57,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:57,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:57,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109217508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109217508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109217509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,623 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d4b6ea62f7bd4cf79777f7a184bf5289 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d4b6ea62f7bd4cf79777f7a184bf5289 2024-11-20T13:25:57,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109217618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109217618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109217619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,628 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into d4b6ea62f7bd4cf79777f7a184bf5289(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:57,628 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:57,628 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109157154; duration=0sec 2024-11-20T13:25:57,628 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:57,628 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:57,648 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b945c28075064b748d919ac0e4480a3b 2024-11-20T13:25:57,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/f2c85e6e067445589fac057d47c69e06 is 50, key is test_row_0/B:col10/1732109156318/Put/seqid=0 2024-11-20T13:25:57,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742365_1541 (size=12301) 2024-11-20T13:25:57,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109217825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109217825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:57,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109217826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,071 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/f2c85e6e067445589fac057d47c69e06 2024-11-20T13:25:58,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/6de00098c3024959a37c844fd3e1911f is 50, key is test_row_0/C:col10/1732109156318/Put/seqid=0 2024-11-20T13:25:58,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742366_1542 (size=12301) 2024-11-20T13:25:58,099 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/6de00098c3024959a37c844fd3e1911f 2024-11-20T13:25:58,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b945c28075064b748d919ac0e4480a3b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b 2024-11-20T13:25:58,114 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b, entries=150, sequenceid=307, filesize=12.0 K 2024-11-20T13:25:58,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/f2c85e6e067445589fac057d47c69e06 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06 2024-11-20T13:25:58,119 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06, entries=150, sequenceid=307, filesize=12.0 K 2024-11-20T13:25:58,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/6de00098c3024959a37c844fd3e1911f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f 2024-11-20T13:25:58,125 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f, entries=150, sequenceid=307, filesize=12.0 K 2024-11-20T13:25:58,125 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 24db988c4fa8e1a0b1451e8c68b68697 in 928ms, sequenceid=307, compaction requested=false 2024-11-20T13:25:58,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:58,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-20T13:25:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-20T13:25:58,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T13:25:58,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8450 sec 2024-11-20T13:25:58,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.8490 sec 2024-11-20T13:25:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:58,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:58,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2de774e9ea6e4b868f4e87fe0fe91bd4 is 50, key is test_row_0/A:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742367_1543 (size=17181) 2024-11-20T13:25:58,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2de774e9ea6e4b868f4e87fe0fe91bd4 2024-11-20T13:25:58,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109218148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/c36fefc164554a6d99a6021300952d38 is 50, key is test_row_0/B:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109218152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109218154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742368_1544 (size=12301) 2024-11-20T13:25:58,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/c36fefc164554a6d99a6021300952d38 2024-11-20T13:25:58,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3fd25629c25b4c30897d2c6906e8b375 is 50, key is test_row_0/C:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742369_1545 (size=12301) 2024-11-20T13:25:58,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3fd25629c25b4c30897d2c6906e8b375 2024-11-20T13:25:58,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/2de774e9ea6e4b868f4e87fe0fe91bd4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4 2024-11-20T13:25:58,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4, entries=250, sequenceid=335, filesize=16.8 K 2024-11-20T13:25:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/c36fefc164554a6d99a6021300952d38 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38 2024-11-20T13:25:58,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T13:25:58,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/3fd25629c25b4c30897d2c6906e8b375 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375 2024-11-20T13:25:58,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375, entries=150, sequenceid=335, filesize=12.0 K 2024-11-20T13:25:58,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 24db988c4fa8e1a0b1451e8c68b68697 in 58ms, sequenceid=335, compaction requested=true 2024-11-20T13:25:58,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:58,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:25:58,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:58,192 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:58,192 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:58,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:25:58,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:58,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:25:58,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:58,193 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:58,193 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42465 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:58,193 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:25:58,193 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:25:58,193 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,193 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,194 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d4b6ea62f7bd4cf79777f7a184bf5289, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.7 K 2024-11-20T13:25:58,194 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2372a5e564ee40c995c2a6b6cd217d44, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=41.5 K 2024-11-20T13:25:58,194 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2372a5e564ee40c995c2a6b6cd217d44, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:58,194 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d4b6ea62f7bd4cf79777f7a184bf5289, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:58,194 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b945c28075064b748d919ac0e4480a3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732109156313 2024-11-20T13:25:58,195 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f2c85e6e067445589fac057d47c69e06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732109156313 2024-11-20T13:25:58,195 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2de774e9ea6e4b868f4e87fe0fe91bd4, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:25:58,195 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c36fefc164554a6d99a6021300952d38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:25:58,203 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:58,204 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/63c352f9ef4843d4ac2d19357f3e5128 is 50, key is test_row_0/A:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,206 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#463 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:58,206 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d45f27ba47614c44beca21ba410dcd39 is 50, key is test_row_0/B:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742370_1546 (size=13085) 2024-11-20T13:25:58,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:58,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:25:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:58,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:58,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7d4f72ddd2be42cab5d39e0004b72355 is 50, key is test_row_1/A:col10/1732109158221/Put/seqid=0 2024-11-20T13:25:58,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742371_1547 (size=13085) 2024-11-20T13:25:58,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742372_1548 (size=12297) 2024-11-20T13:25:58,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7d4f72ddd2be42cab5d39e0004b72355 2024-11-20T13:25:58,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a790ccff67d84c88994d58ea19d9fdfa is 50, key is test_row_1/B:col10/1732109158221/Put/seqid=0 2024-11-20T13:25:58,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742373_1549 (size=9857) 2024-11-20T13:25:58,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109218273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109218274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109218275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109218275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109218276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109218378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109218382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T13:25:58,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109218382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,386 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T13:25:58,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109218383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109218384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:25:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-20T13:25:58,389 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:25:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:25:58,389 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:25:58,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:25:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:25:58,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T13:25:58,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:58,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109218581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109218587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109218587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109218589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109218589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,614 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/63c352f9ef4843d4ac2d19357f3e5128 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/63c352f9ef4843d4ac2d19357f3e5128 2024-11-20T13:25:58,619 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into 63c352f9ef4843d4ac2d19357f3e5128(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:58,619 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:58,619 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109158192; duration=0sec 2024-11-20T13:25:58,619 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:25:58,619 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:25:58,619 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:25:58,620 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:25:58,620 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:25:58,620 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,621 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0a3e68140293407c85478108332453a4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.7 K 2024-11-20T13:25:58,621 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a3e68140293407c85478108332453a4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732109155971 2024-11-20T13:25:58,621 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6de00098c3024959a37c844fd3e1911f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1732109156313 2024-11-20T13:25:58,621 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fd25629c25b4c30897d2c6906e8b375, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:25:58,630 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#466 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:25:58,630 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/ee0927e1f8a342e4a0aafa731d180fc9 is 50, key is test_row_0/C:col10/1732109158133/Put/seqid=0 2024-11-20T13:25:58,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742374_1550 (size=13085) 2024-11-20T13:25:58,644 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d45f27ba47614c44beca21ba410dcd39 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d45f27ba47614c44beca21ba410dcd39 2024-11-20T13:25:58,647 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into d45f27ba47614c44beca21ba410dcd39(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:58,648 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:58,648 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109158192; duration=0sec 2024-11-20T13:25:58,648 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:58,648 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:25:58,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a790ccff67d84c88994d58ea19d9fdfa 2024-11-20T13:25:58,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c36a79ddbe1a4618b1fdb04b8157d9bf is 50, key is test_row_1/C:col10/1732109158221/Put/seqid=0 2024-11-20T13:25:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742375_1551 (size=9857) 2024-11-20T13:25:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:25:58,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T13:25:58,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:58,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T13:25:58,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:58,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:58,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:58,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109218887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109218892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109218892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109218893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109218895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:58,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:25:59,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T13:25:59,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:59,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:59,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:59,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:59,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:25:59,038 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/ee0927e1f8a342e4a0aafa731d180fc9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ee0927e1f8a342e4a0aafa731d180fc9 2024-11-20T13:25:59,044 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into ee0927e1f8a342e4a0aafa731d180fc9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:25:59,044 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:59,044 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109158193; duration=0sec 2024-11-20T13:25:59,044 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:25:59,045 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:25:59,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c36a79ddbe1a4618b1fdb04b8157d9bf 2024-11-20T13:25:59,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/7d4f72ddd2be42cab5d39e0004b72355 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355 2024-11-20T13:25:59,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355, entries=150, sequenceid=347, filesize=12.0 K 2024-11-20T13:25:59,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a790ccff67d84c88994d58ea19d9fdfa as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa 2024-11-20T13:25:59,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa, entries=100, sequenceid=347, filesize=9.6 K 2024-11-20T13:25:59,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c36a79ddbe1a4618b1fdb04b8157d9bf as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf 2024-11-20T13:25:59,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf, entries=100, sequenceid=347, filesize=9.6 K 2024-11-20T13:25:59,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 24db988c4fa8e1a0b1451e8c68b68697 in 864ms, sequenceid=347, compaction requested=false 2024-11-20T13:25:59,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:59,152 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T13:25:59,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:59,153 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:25:59,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:59,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:59,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:59,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b3863c0d3dde426c81edc2c8e2f942f1 is 50, key is test_row_0/A:col10/1732109158275/Put/seqid=0 2024-11-20T13:25:59,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742376_1552 (size=12301) 2024-11-20T13:25:59,187 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b3863c0d3dde426c81edc2c8e2f942f1 2024-11-20T13:25:59,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80f441647ef64911b8c8a9da5712b30d is 50, key is test_row_0/B:col10/1732109158275/Put/seqid=0 2024-11-20T13:25:59,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742377_1553 (size=12301) 2024-11-20T13:25:59,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:25:59,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:59,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109219405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109219406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109219407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109219410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109219411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:25:59,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109219512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109219512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109219512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109219516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109219517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,602 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80f441647ef64911b8c8a9da5712b30d 2024-11-20T13:25:59,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/4e3b57003a194f8bb589bdb8bbcaba48 is 50, key is test_row_0/C:col10/1732109158275/Put/seqid=0 2024-11-20T13:25:59,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742378_1554 (size=12301) 2024-11-20T13:25:59,614 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/4e3b57003a194f8bb589bdb8bbcaba48 2024-11-20T13:25:59,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/b3863c0d3dde426c81edc2c8e2f942f1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1 2024-11-20T13:25:59,624 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T13:25:59,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/80f441647ef64911b8c8a9da5712b30d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d 2024-11-20T13:25:59,628 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T13:25:59,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/4e3b57003a194f8bb589bdb8bbcaba48 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48 2024-11-20T13:25:59,634 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48, entries=150, sequenceid=374, filesize=12.0 K 2024-11-20T13:25:59,635 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 24db988c4fa8e1a0b1451e8c68b68697 in 482ms, sequenceid=374, compaction requested=true 2024-11-20T13:25:59,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:25:59,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:25:59,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-20T13:25:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-20T13:25:59,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T13:25:59,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2480 sec 2024-11-20T13:25:59,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.2530 sec 2024-11-20T13:25:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:25:59,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:25:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:25:59,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/a572f769238448e7820c8df3e7f4b69b is 50, key is test_row_0/A:col10/1732109159406/Put/seqid=0 2024-11-20T13:25:59,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742379_1555 (size=14741) 2024-11-20T13:25:59,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/a572f769238448e7820c8df3e7f4b69b 2024-11-20T13:25:59,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9943adf9310a4e20977fda70b5ca4064 is 50, key is test_row_0/B:col10/1732109159406/Put/seqid=0 2024-11-20T13:25:59,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742380_1556 (size=12301) 2024-11-20T13:25:59,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109219780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109219780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109219781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109219782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109219785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109219890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109219890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109219890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109219891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:25:59,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:25:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109219892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109220097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109220097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109220097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109220097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109220097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9943adf9310a4e20977fda70b5ca4064 2024-11-20T13:26:00,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0cd885729def44599be60f9fd70cdf0b is 50, key is test_row_0/C:col10/1732109159406/Put/seqid=0 2024-11-20T13:26:00,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742381_1557 (size=12301) 2024-11-20T13:26:00,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109220403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109220403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109220403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109220403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109220405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T13:26:00,493 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-20T13:26:00,494 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-20T13:26:00,495 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T13:26:00,496 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:00,496 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:00,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0cd885729def44599be60f9fd70cdf0b 2024-11-20T13:26:00,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/a572f769238448e7820c8df3e7f4b69b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b 2024-11-20T13:26:00,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b, entries=200, sequenceid=387, filesize=14.4 K 2024-11-20T13:26:00,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/9943adf9310a4e20977fda70b5ca4064 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064 2024-11-20T13:26:00,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064, entries=150, sequenceid=387, filesize=12.0 K 2024-11-20T13:26:00,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/0cd885729def44599be60f9fd70cdf0b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b 2024-11-20T13:26:00,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b, entries=150, sequenceid=387, filesize=12.0 K 2024-11-20T13:26:00,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 24db988c4fa8e1a0b1451e8c68b68697 in 854ms, sequenceid=387, compaction requested=true 2024-11-20T13:26:00,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:00,574 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:00,574 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:00,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52424 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:26:00,575 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:00,575 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:00,575 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/63c352f9ef4843d4ac2d19357f3e5128, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=51.2 K 2024-11-20T13:26:00,575 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d45f27ba47614c44beca21ba410dcd39, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=46.4 K 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c352f9ef4843d4ac2d19357f3e5128, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:26:00,575 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting d45f27ba47614c44beca21ba410dcd39, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d4f72ddd2be42cab5d39e0004b72355, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732109158147 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a790ccff67d84c88994d58ea19d9fdfa, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732109158147 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 80f441647ef64911b8c8a9da5712b30d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732109158265 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3863c0d3dde426c81edc2c8e2f942f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732109158265 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a572f769238448e7820c8df3e7f4b69b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:00,576 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 9943adf9310a4e20977fda70b5ca4064, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:00,583 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:00,584 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a27284d3e01344908074e09fcee3f71c is 50, key is test_row_0/B:col10/1732109159406/Put/seqid=0 2024-11-20T13:26:00,586 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:00,587 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/ce3c50d7a01c41e0a14a4a2e9076a7d3 is 50, key is test_row_0/A:col10/1732109159406/Put/seqid=0 2024-11-20T13:26:00,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742382_1558 (size=13221) 2024-11-20T13:26:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742383_1559 (size=13221) 2024-11-20T13:26:00,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/a27284d3e01344908074e09fcee3f71c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a27284d3e01344908074e09fcee3f71c 2024-11-20T13:26:00,595 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/ce3c50d7a01c41e0a14a4a2e9076a7d3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/ce3c50d7a01c41e0a14a4a2e9076a7d3 2024-11-20T13:26:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T13:26:00,597 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into a27284d3e01344908074e09fcee3f71c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:00,597 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:00,597 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=12, startTime=1732109160574; duration=0sec 2024-11-20T13:26:00,598 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:00,598 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:26:00,598 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:00,600 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:00,600 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:26:00,600 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:00,600 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ee0927e1f8a342e4a0aafa731d180fc9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=46.4 K 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting ee0927e1f8a342e4a0aafa731d180fc9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732109157506 2024-11-20T13:26:00,601 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into ce3c50d7a01c41e0a14a4a2e9076a7d3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:00,601 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=12, startTime=1732109160573; duration=0sec 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c36a79ddbe1a4618b1fdb04b8157d9bf, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1732109158147 2024-11-20T13:26:00,601 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e3b57003a194f8bb589bdb8bbcaba48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732109158265 2024-11-20T13:26:00,602 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cd885729def44599be60f9fd70cdf0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:00,610 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#476 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:00,610 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/43215b4e9d1a4343b1ec318c3a004583 is 50, key is test_row_0/C:col10/1732109159406/Put/seqid=0 2024-11-20T13:26:00,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742384_1560 (size=13221) 2024-11-20T13:26:00,626 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/43215b4e9d1a4343b1ec318c3a004583 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/43215b4e9d1a4343b1ec318c3a004583 2024-11-20T13:26:00,631 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 43215b4e9d1a4343b1ec318c3a004583(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:00,631 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:00,631 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=12, startTime=1732109160574; duration=0sec 2024-11-20T13:26:00,631 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:00,631 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:26:00,647 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T13:26:00,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:00,648 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:00,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/93762813718a458a881ab3e82f876f4c is 50, key is test_row_0/A:col10/1732109159780/Put/seqid=0 2024-11-20T13:26:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742385_1561 (size=12301) 2024-11-20T13:26:00,658 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/93762813718a458a881ab3e82f876f4c 2024-11-20T13:26:00,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a5e167da42347e6802c98e9a6524096 is 50, key is test_row_0/B:col10/1732109159780/Put/seqid=0 2024-11-20T13:26:00,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742386_1562 (size=12301) 2024-11-20T13:26:00,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T13:26:00,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:00,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109220919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109220920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109220921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109220922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:00,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109220922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109221024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109221024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109221026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109221031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109221031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,078 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a5e167da42347e6802c98e9a6524096 2024-11-20T13:26:01,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c573fd03521e4381bfe6c2462bd2181e is 50, key is test_row_0/C:col10/1732109159780/Put/seqid=0 2024-11-20T13:26:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742387_1563 (size=12301) 2024-11-20T13:26:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T13:26:01,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109221229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109221229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109221232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109221235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109221235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c573fd03521e4381bfe6c2462bd2181e 2024-11-20T13:26:01,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/93762813718a458a881ab3e82f876f4c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c 2024-11-20T13:26:01,504 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T13:26:01,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0a5e167da42347e6802c98e9a6524096 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096 2024-11-20T13:26:01,509 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T13:26:01,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/c573fd03521e4381bfe6c2462bd2181e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e 2024-11-20T13:26:01,513 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e, entries=150, sequenceid=413, filesize=12.0 K 2024-11-20T13:26:01,514 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 24db988c4fa8e1a0b1451e8c68b68697 in 866ms, sequenceid=413, compaction requested=false 2024-11-20T13:26:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-20T13:26:01,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-20T13:26:01,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T13:26:01,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0210 sec 2024-11-20T13:26:01,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.0250 sec 2024-11-20T13:26:01,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:01,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:01,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/20033fa1608d4fe7999bba38b8856b5c is 50, key is test_row_0/A:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:01,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742388_1564 (size=14741) 2024-11-20T13:26:01,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/20033fa1608d4fe7999bba38b8856b5c 2024-11-20T13:26:01,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7929b418689c491ebb661e1885265537 is 50, key is test_row_0/B:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:01,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109221561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109221562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742389_1565 (size=12301) 2024-11-20T13:26:01,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109221568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109221569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109221569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T13:26:01,599 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-20T13:26:01,600 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:01,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-11-20T13:26:01,602 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:01,603 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:01,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:01,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109221670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109221671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109221680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109221681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109221681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:01,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:01,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:01,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:01,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:01,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109221877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109221877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109221886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109221886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109221886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:01,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:01,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:01,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:01,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:01,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:01,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:01,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7929b418689c491ebb661e1885265537 2024-11-20T13:26:01,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/ade65092f23d4140bcf0b74e3669a658 is 50, key is test_row_0/C:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:01,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742390_1566 (size=12301) 2024-11-20T13:26:02,061 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:02,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:02,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109222180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109222181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109222194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109222194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109222195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:02,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:02,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:02,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:02,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/ade65092f23d4140bcf0b74e3669a658 2024-11-20T13:26:02,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/20033fa1608d4fe7999bba38b8856b5c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c 2024-11-20T13:26:02,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c, entries=200, sequenceid=428, filesize=14.4 K 2024-11-20T13:26:02,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/7929b418689c491ebb661e1885265537 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537 2024-11-20T13:26:02,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537, entries=150, sequenceid=428, filesize=12.0 K 2024-11-20T13:26:02,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/ade65092f23d4140bcf0b74e3669a658 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658 2024-11-20T13:26:02,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658, entries=150, sequenceid=428, filesize=12.0 K 2024-11-20T13:26:02,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 24db988c4fa8e1a0b1451e8c68b68697 in 870ms, sequenceid=428, compaction requested=true 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:02,406 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:02,406 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:02,414 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:02,414 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:26:02,414 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,414 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/ce3c50d7a01c41e0a14a4a2e9076a7d3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=39.3 K 2024-11-20T13:26:02,415 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:02,415 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:26:02,415 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,415 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a27284d3e01344908074e09fcee3f71c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.9 K 2024-11-20T13:26:02,415 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3c50d7a01c41e0a14a4a2e9076a7d3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:02,417 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a27284d3e01344908074e09fcee3f71c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:02,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93762813718a458a881ab3e82f876f4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109159767 2024-11-20T13:26:02,417 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a5e167da42347e6802c98e9a6524096, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109159767 2024-11-20T13:26:02,417 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20033fa1608d4fe7999bba38b8856b5c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160915 2024-11-20T13:26:02,417 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7929b418689c491ebb661e1885265537, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160921 2024-11-20T13:26:02,433 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#483 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:02,433 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e915ed797abb4e2689621cd95cb61d76 is 50, key is test_row_0/A:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:02,437 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#484 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:02,438 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0540113c82234714bfb510941a57992c is 50, key is test_row_0/B:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:02,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742391_1567 (size=13323) 2024-11-20T13:26:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742392_1568 (size=13323) 2024-11-20T13:26:02,480 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e915ed797abb4e2689621cd95cb61d76 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e915ed797abb4e2689621cd95cb61d76 2024-11-20T13:26:02,480 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/0540113c82234714bfb510941a57992c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0540113c82234714bfb510941a57992c 2024-11-20T13:26:02,488 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into e915ed797abb4e2689621cd95cb61d76(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:02,488 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:02,488 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109162406; duration=0sec 2024-11-20T13:26:02,488 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:02,488 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:26:02,488 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:02,492 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:02,492 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:26:02,492 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,492 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into 0540113c82234714bfb510941a57992c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:02,492 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:02,492 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109162406; duration=0sec 2024-11-20T13:26:02,492 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/43215b4e9d1a4343b1ec318c3a004583, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=36.9 K 2024-11-20T13:26:02,493 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:02,493 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:26:02,493 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43215b4e9d1a4343b1ec318c3a004583, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732109159406 2024-11-20T13:26:02,493 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting c573fd03521e4381bfe6c2462bd2181e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732109159767 2024-11-20T13:26:02,493 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ade65092f23d4140bcf0b74e3669a658, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160921 2024-11-20T13:26:02,505 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#485 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:02,506 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/abb68bad8792450eac515b6fe8e8dc51 is 50, key is test_row_0/C:col10/1732109160921/Put/seqid=0 2024-11-20T13:26:02,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:02,521 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:02,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:02,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742393_1569 (size=13323) 2024-11-20T13:26:02,535 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/abb68bad8792450eac515b6fe8e8dc51 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/abb68bad8792450eac515b6fe8e8dc51 2024-11-20T13:26:02,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/bad00b8a8d344878b5d0cee9d47e41aa is 50, key is test_row_0/A:col10/1732109161568/Put/seqid=0 2024-11-20T13:26:02,542 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into abb68bad8792450eac515b6fe8e8dc51(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:02,542 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:02,542 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109162406; duration=0sec 2024-11-20T13:26:02,542 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:02,542 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:26:02,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742394_1570 (size=12301) 2024-11-20T13:26:02,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:02,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. as already flushing 2024-11-20T13:26:02,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:02,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109222706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109222706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109222708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109222708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109222709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109222811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109222811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109222814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109222815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109222815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:02,948 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/bad00b8a8d344878b5d0cee9d47e41aa 2024-11-20T13:26:02,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/76b93991f6e14ac4a026f843d403fa96 is 50, key is test_row_0/B:col10/1732109161568/Put/seqid=0 2024-11-20T13:26:02,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742395_1571 (size=12301) 2024-11-20T13:26:03,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109223013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109223014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109223017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109223018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109223019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109223317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109223318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109223324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109223325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109223325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,363 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/76b93991f6e14ac4a026f843d403fa96 2024-11-20T13:26:03,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37a918d1775643e0a71647a4070d0b3e is 50, key is test_row_0/C:col10/1732109161568/Put/seqid=0 2024-11-20T13:26:03,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742396_1572 (size=12301) 2024-11-20T13:26:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:03,776 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37a918d1775643e0a71647a4070d0b3e 2024-11-20T13:26:03,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/bad00b8a8d344878b5d0cee9d47e41aa as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa 2024-11-20T13:26:03,783 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa, entries=150, sequenceid=452, filesize=12.0 K 2024-11-20T13:26:03,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/76b93991f6e14ac4a026f843d403fa96 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96 2024-11-20T13:26:03,787 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96, entries=150, sequenceid=452, filesize=12.0 K 2024-11-20T13:26:03,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37a918d1775643e0a71647a4070d0b3e as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e 2024-11-20T13:26:03,792 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e, entries=150, sequenceid=452, filesize=12.0 K 2024-11-20T13:26:03,793 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 24db988c4fa8e1a0b1451e8c68b68697 in 1272ms, sequenceid=452, compaction requested=false 2024-11-20T13:26:03,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:03,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:03,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-11-20T13:26:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-11-20T13:26:03,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T13:26:03,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1910 sec 2024-11-20T13:26:03,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 2.1960 sec 2024-11-20T13:26:03,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:03,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T13:26:03,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:03,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/461a8a29000e47f3bed18d1fb17be29a is 50, key is test_row_0/A:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:03,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742397_1573 (size=14741) 2024-11-20T13:26:03,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/461a8a29000e47f3bed18d1fb17be29a 2024-11-20T13:26:03,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dd90fbd4a847466da52b5b5f94734151 is 50, key is test_row_0/B:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:03,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742398_1574 (size=12301) 2024-11-20T13:26:03,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dd90fbd4a847466da52b5b5f94734151 2024-11-20T13:26:03,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37960c7b80774a3b802c1953fb94923a is 50, key is test_row_0/C:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:03,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742399_1575 (size=12301) 2024-11-20T13:26:03,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109223858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109223859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109223860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109223860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109223862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109223966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109223967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109223968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109223969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:03,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:03,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109223970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109224173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109224173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109224174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109224174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109224174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37960c7b80774a3b802c1953fb94923a 2024-11-20T13:26:04,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/461a8a29000e47f3bed18d1fb17be29a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a 2024-11-20T13:26:04,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a, entries=200, sequenceid=468, filesize=14.4 K 2024-11-20T13:26:04,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dd90fbd4a847466da52b5b5f94734151 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151 2024-11-20T13:26:04,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151, entries=150, sequenceid=468, filesize=12.0 K 2024-11-20T13:26:04,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/37960c7b80774a3b802c1953fb94923a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a 2024-11-20T13:26:04,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a, entries=150, sequenceid=468, filesize=12.0 K 2024-11-20T13:26:04,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 24db988c4fa8e1a0b1451e8c68b68697 in 457ms, sequenceid=468, compaction requested=true 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:04,281 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:04,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:04,281 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:04,282 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:04,282 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:04,282 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:26:04,282 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:26:04,282 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:04,282 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:04,282 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e915ed797abb4e2689621cd95cb61d76, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=39.4 K 2024-11-20T13:26:04,282 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0540113c82234714bfb510941a57992c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=37.0 K 2024-11-20T13:26:04,283 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting e915ed797abb4e2689621cd95cb61d76, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160921 2024-11-20T13:26:04,283 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0540113c82234714bfb510941a57992c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160921 2024-11-20T13:26:04,283 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 76b93991f6e14ac4a026f843d403fa96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732109161560 2024-11-20T13:26:04,283 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting dd90fbd4a847466da52b5b5f94734151, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:04,284 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bad00b8a8d344878b5d0cee9d47e41aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732109161560 2024-11-20T13:26:04,284 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 461a8a29000e47f3bed18d1fb17be29a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:04,290 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#492 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:04,291 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/5cb51c4814ec42988837cafae76d7cf0 is 50, key is test_row_0/B:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:04,306 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#493 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:04,307 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d7c79a4509274310865e0663fb4cb2c3 is 50, key is test_row_0/A:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:04,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742400_1576 (size=13425) 2024-11-20T13:26:04,323 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/5cb51c4814ec42988837cafae76d7cf0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5cb51c4814ec42988837cafae76d7cf0 2024-11-20T13:26:04,330 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into 5cb51c4814ec42988837cafae76d7cf0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:04,330 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:04,330 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109164281; duration=0sec 2024-11-20T13:26:04,330 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:04,330 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:26:04,330 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:04,332 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:04,333 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:26:04,333 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:04,333 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/abb68bad8792450eac515b6fe8e8dc51, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=37.0 K 2024-11-20T13:26:04,333 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting abb68bad8792450eac515b6fe8e8dc51, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1732109160921 2024-11-20T13:26:04,333 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 37a918d1775643e0a71647a4070d0b3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732109161560 2024-11-20T13:26:04,334 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 37960c7b80774a3b802c1953fb94923a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:04,340 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#494 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:04,342 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/4602acc8c28e4e4b81d0ef1112a353bd is 50, key is test_row_0/C:col10/1732109162702/Put/seqid=0 2024-11-20T13:26:04,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742401_1577 (size=13425) 2024-11-20T13:26:04,359 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/d7c79a4509274310865e0663fb4cb2c3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d7c79a4509274310865e0663fb4cb2c3 2024-11-20T13:26:04,365 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into d7c79a4509274310865e0663fb4cb2c3(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:04,365 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:04,365 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109164281; duration=0sec 2024-11-20T13:26:04,365 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:04,365 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:26:04,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742402_1578 (size=13425) 2024-11-20T13:26:04,392 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/4602acc8c28e4e4b81d0ef1112a353bd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4602acc8c28e4e4b81d0ef1112a353bd 2024-11-20T13:26:04,400 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 4602acc8c28e4e4b81d0ef1112a353bd(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:04,400 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:04,400 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109164281; duration=0sec 2024-11-20T13:26:04,400 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:04,400 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:26:04,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:26:04,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:04,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:04,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/48d77827627246f691c5d1c174ff2922 is 50, key is test_row_0/A:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:04,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109224495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109224495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109224497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109224498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109224499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742403_1579 (size=12301) 2024-11-20T13:26:04,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/48d77827627246f691c5d1c174ff2922 2024-11-20T13:26:04,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/b20f0e83e6804970a8b906e72d57e959 is 50, key is test_row_0/B:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:04,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742404_1580 (size=12301) 2024-11-20T13:26:04,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109224602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109224603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109224603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109224603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109224603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109224807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109224808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109224809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109224810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:04,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109224810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:04,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/b20f0e83e6804970a8b906e72d57e959 2024-11-20T13:26:04,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/396930f66c52439b8fc68634eed50786 is 50, key is test_row_0/C:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:04,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742405_1581 (size=12301) 2024-11-20T13:26:04,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/396930f66c52439b8fc68634eed50786 2024-11-20T13:26:04,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/48d77827627246f691c5d1c174ff2922 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922 2024-11-20T13:26:04,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922, entries=150, sequenceid=496, filesize=12.0 K 2024-11-20T13:26:04,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/b20f0e83e6804970a8b906e72d57e959 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959 2024-11-20T13:26:04,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959, entries=150, sequenceid=496, filesize=12.0 K 2024-11-20T13:26:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/396930f66c52439b8fc68634eed50786 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786 2024-11-20T13:26:04,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786, entries=150, sequenceid=496, filesize=12.0 K 2024-11-20T13:26:04,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 24db988c4fa8e1a0b1451e8c68b68697 in 497ms, sequenceid=496, compaction requested=false 2024-11-20T13:26:04,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:05,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:05,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:05,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:05,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/270ba06983774f9f84428e34ea2e0be5 is 50, key is test_row_1/A:col10/1732109165113/Put/seqid=0 2024-11-20T13:26:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742406_1582 (size=14737) 2024-11-20T13:26:05,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109225143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109225144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109225147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109225149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109225151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109225252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109225253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109225253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109225257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109225257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,419 DEBUG [Thread-2129 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6263bb7a to 127.0.0.1:53074 2024-11-20T13:26:05,419 DEBUG [Thread-2129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:05,420 DEBUG [Thread-2133 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30d04cd0 to 127.0.0.1:53074 2024-11-20T13:26:05,420 DEBUG [Thread-2133 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:05,420 DEBUG [Thread-2131 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a8593f to 127.0.0.1:53074 2024-11-20T13:26:05,420 DEBUG [Thread-2131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:05,425 DEBUG [Thread-2135 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10d34e60 to 127.0.0.1:53074 2024-11-20T13:26:05,425 DEBUG [Thread-2135 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:05,426 DEBUG [Thread-2137 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x33ef6df4 to 127.0.0.1:53074 2024-11-20T13:26:05,426 DEBUG [Thread-2137 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:05,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109225459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109225459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109225460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109225460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109225463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/270ba06983774f9f84428e34ea2e0be5 2024-11-20T13:26:05,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3a4870618c9d47ca9a3648598361cf98 is 50, key is test_row_1/B:col10/1732109165113/Put/seqid=0 2024-11-20T13:26:05,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742407_1583 (size=9857) 2024-11-20T13:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T13:26:05,707 INFO [Thread-2128 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-20T13:26:05,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109225761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109225762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109225762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109225762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109225765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:05,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3a4870618c9d47ca9a3648598361cf98 2024-11-20T13:26:05,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bbe60fb613fb48099a581c40db3326f1 is 50, key is test_row_1/C:col10/1732109165113/Put/seqid=0 2024-11-20T13:26:05,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742408_1584 (size=9857) 2024-11-20T13:26:06,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1732109226263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:06,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45414 deadline: 1732109226264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:06,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45360 deadline: 1732109226266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:06,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45370 deadline: 1732109226267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:06,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45390 deadline: 1732109226270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:06,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bbe60fb613fb48099a581c40db3326f1 2024-11-20T13:26:06,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/270ba06983774f9f84428e34ea2e0be5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5 2024-11-20T13:26:06,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5, entries=200, sequenceid=509, filesize=14.4 K 2024-11-20T13:26:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/3a4870618c9d47ca9a3648598361cf98 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98 2024-11-20T13:26:06,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98, entries=100, sequenceid=509, filesize=9.6 K 2024-11-20T13:26:06,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/bbe60fb613fb48099a581c40db3326f1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1 2024-11-20T13:26:06,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1, entries=100, sequenceid=509, filesize=9.6 K 2024-11-20T13:26:06,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 24db988c4fa8e1a0b1451e8c68b68697 in 1252ms, sequenceid=509, compaction requested=true 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:06,367 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 24db988c4fa8e1a0b1451e8c68b68697:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:06,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:06,367 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:06,368 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40463 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:06,368 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/A is initiating minor compaction (all files) 2024-11-20T13:26:06,368 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/A in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:06,368 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:06,368 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d7c79a4509274310865e0663fb4cb2c3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=39.5 K 2024-11-20T13:26:06,368 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/B is initiating minor compaction (all files) 2024-11-20T13:26:06,368 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/B in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:06,368 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5cb51c4814ec42988837cafae76d7cf0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=34.7 K 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7c79a4509274310865e0663fb4cb2c3, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cb51c4814ec42988837cafae76d7cf0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48d77827627246f691c5d1c174ff2922, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732109163857 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b20f0e83e6804970a8b906e72d57e959, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732109163857 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 270ba06983774f9f84428e34ea2e0be5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1732109164495 2024-11-20T13:26:06,369 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a4870618c9d47ca9a3648598361cf98, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1732109165113 2024-11-20T13:26:06,376 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#B#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:06,377 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/685c48fa401243a18a3ce7e01868831b is 50, key is test_row_0/B:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:06,377 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#A#compaction#502 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:06,377 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/9c47dadcb09d4bb2957ff65907ffcd45 is 50, key is test_row_0/A:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:06,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742409_1585 (size=13527) 2024-11-20T13:26:06,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742410_1586 (size=13527) 2024-11-20T13:26:06,384 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/685c48fa401243a18a3ce7e01868831b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/685c48fa401243a18a3ce7e01868831b 2024-11-20T13:26:06,388 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/B of 24db988c4fa8e1a0b1451e8c68b68697 into 685c48fa401243a18a3ce7e01868831b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:06,388 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:06,388 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/B, priority=13, startTime=1732109166367; duration=0sec 2024-11-20T13:26:06,388 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:06,388 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:B 2024-11-20T13:26:06,388 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:06,389 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:06,389 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): 24db988c4fa8e1a0b1451e8c68b68697/C is initiating minor compaction (all files) 2024-11-20T13:26:06,389 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 24db988c4fa8e1a0b1451e8c68b68697/C in TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:06,389 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4602acc8c28e4e4b81d0ef1112a353bd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp, totalSize=34.7 K 2024-11-20T13:26:06,389 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 4602acc8c28e4e4b81d0ef1112a353bd, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732109162702 2024-11-20T13:26:06,389 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 396930f66c52439b8fc68634eed50786, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=496, earliestPutTs=1732109163857 2024-11-20T13:26:06,390 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting bbe60fb613fb48099a581c40db3326f1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=509, earliestPutTs=1732109165113 2024-11-20T13:26:06,396 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 24db988c4fa8e1a0b1451e8c68b68697#C#compaction#503 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:06,396 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/49e064ff83e847628e725d1df44d4958 is 50, key is test_row_0/C:col10/1732109163857/Put/seqid=0 2024-11-20T13:26:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742411_1587 (size=13527) 2024-11-20T13:26:06,785 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/9c47dadcb09d4bb2957ff65907ffcd45 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/9c47dadcb09d4bb2957ff65907ffcd45 2024-11-20T13:26:06,789 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/A of 24db988c4fa8e1a0b1451e8c68b68697 into 9c47dadcb09d4bb2957ff65907ffcd45(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:06,789 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:06,789 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/A, priority=13, startTime=1732109166367; duration=0sec 2024-11-20T13:26:06,789 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:06,789 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:A 2024-11-20T13:26:06,803 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/49e064ff83e847628e725d1df44d4958 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/49e064ff83e847628e725d1df44d4958 2024-11-20T13:26:06,807 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 24db988c4fa8e1a0b1451e8c68b68697/C of 24db988c4fa8e1a0b1451e8c68b68697 into 49e064ff83e847628e725d1df44d4958(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:06,807 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:06,807 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697., storeName=24db988c4fa8e1a0b1451e8c68b68697/C, priority=13, startTime=1732109166367; duration=0sec 2024-11-20T13:26:06,807 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:06,807 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 24db988c4fa8e1a0b1451e8c68b68697:C 2024-11-20T13:26:07,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:07,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:26:07,269 DEBUG [Thread-2120 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x01b5ec79 to 127.0.0.1:53074 2024-11-20T13:26:07,269 DEBUG [Thread-2120 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:07,270 DEBUG [Thread-2124 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131834fc to 127.0.0.1:53074 2024-11-20T13:26:07,270 DEBUG [Thread-2124 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:07,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:07,271 DEBUG [Thread-2118 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1baebee3 to 127.0.0.1:53074 2024-11-20T13:26:07,271 DEBUG [Thread-2118 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,274 DEBUG [Thread-2122 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51237850 to 127.0.0.1:53074 2024-11-20T13:26:07,274 DEBUG [Thread-2122 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3421051760d34f8996a64170b1f4112a is 50, key is test_row_0/A:col10/1732109167268/Put/seqid=0 2024-11-20T13:26:07,277 DEBUG [Thread-2126 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x579a9390 to 127.0.0.1:53074 2024-11-20T13:26:07,277 DEBUG [Thread-2126 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 135 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2301 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6903 rows 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2289 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6867 rows 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2290 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6870 rows 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2304 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6912 rows 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2288 2024-11-20T13:26:07,277 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6864 rows 2024-11-20T13:26:07,277 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:26:07,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x373cbe4b to 127.0.0.1:53074 2024-11-20T13:26:07,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:07,279 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:26:07,279 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:26:07,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:07,286 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109167285"}]},"ts":"1732109167285"} 2024-11-20T13:26:07,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742412_1588 (size=12301) 2024-11-20T13:26:07,287 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:26:07,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3421051760d34f8996a64170b1f4112a 2024-11-20T13:26:07,289 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:26:07,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:26:07,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, UNASSIGN}] 2024-11-20T13:26:07,292 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, UNASSIGN 2024-11-20T13:26:07,293 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=24db988c4fa8e1a0b1451e8c68b68697, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:07,294 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:26:07,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure 24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:26:07,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dab8f64a74984ab4bea2259d932c4fca is 50, key is test_row_0/B:col10/1732109167268/Put/seqid=0 2024-11-20T13:26:07,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742413_1589 (size=12301) 2024-11-20T13:26:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:07,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:07,447 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:07,447 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:26:07,447 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing 24db988c4fa8e1a0b1451e8c68b68697, disabling compactions & flushes 2024-11-20T13:26:07,447 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:07,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dab8f64a74984ab4bea2259d932c4fca 2024-11-20T13:26:07,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/b7b9e013432f4c64bb5a0731a934084a is 50, key is test_row_0/C:col10/1732109167268/Put/seqid=0 2024-11-20T13:26:07,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742414_1590 (size=12301) 2024-11-20T13:26:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:08,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/b7b9e013432f4c64bb5a0731a934084a 2024-11-20T13:26:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/3421051760d34f8996a64170b1f4112a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3421051760d34f8996a64170b1f4112a 2024-11-20T13:26:08,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3421051760d34f8996a64170b1f4112a, entries=150, sequenceid=537, filesize=12.0 K 2024-11-20T13:26:08,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/dab8f64a74984ab4bea2259d932c4fca as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dab8f64a74984ab4bea2259d932c4fca 2024-11-20T13:26:08,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dab8f64a74984ab4bea2259d932c4fca, entries=150, sequenceid=537, filesize=12.0 K 2024-11-20T13:26:08,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/b7b9e013432f4c64bb5a0731a934084a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/b7b9e013432f4c64bb5a0731a934084a 2024-11-20T13:26:08,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/b7b9e013432f4c64bb5a0731a934084a, entries=150, sequenceid=537, filesize=12.0 K 2024-11-20T13:26:08,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=20.13 KB/20610 for 24db988c4fa8e1a0b1451e8c68b68697 in 854ms, sequenceid=537, compaction requested=false 2024-11-20T13:26:08,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:08,123 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. after waiting 0 ms 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:08,123 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(2837): Flushing 24db988c4fa8e1a0b1451e8c68b68697 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=A 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:08,123 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=B 2024-11-20T13:26:08,124 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:08,124 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 24db988c4fa8e1a0b1451e8c68b68697, store=C 2024-11-20T13:26:08,124 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:08,126 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e33d7bdc7f57407c97e6128dbe9aa69c is 50, key is test_row_0/A:col10/1732109167276/Put/seqid=0 2024-11-20T13:26:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742415_1591 (size=12301) 2024-11-20T13:26:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:08,530 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e33d7bdc7f57407c97e6128dbe9aa69c 2024-11-20T13:26:08,536 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d110fb2ec4ba48eeab5e180945f738b4 is 50, key is test_row_0/B:col10/1732109167276/Put/seqid=0 2024-11-20T13:26:08,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742416_1592 (size=12301) 2024-11-20T13:26:08,940 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d110fb2ec4ba48eeab5e180945f738b4 2024-11-20T13:26:08,946 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/fb33887b4b3a4e558e7880aaf6345a5d is 50, key is test_row_0/C:col10/1732109167276/Put/seqid=0 2024-11-20T13:26:08,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742417_1593 (size=12301) 2024-11-20T13:26:09,350 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/fb33887b4b3a4e558e7880aaf6345a5d 2024-11-20T13:26:09,354 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/A/e33d7bdc7f57407c97e6128dbe9aa69c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e33d7bdc7f57407c97e6128dbe9aa69c 2024-11-20T13:26:09,357 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e33d7bdc7f57407c97e6128dbe9aa69c, entries=150, sequenceid=543, filesize=12.0 K 2024-11-20T13:26:09,357 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/B/d110fb2ec4ba48eeab5e180945f738b4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d110fb2ec4ba48eeab5e180945f738b4 2024-11-20T13:26:09,360 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d110fb2ec4ba48eeab5e180945f738b4, entries=150, sequenceid=543, filesize=12.0 K 2024-11-20T13:26:09,360 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/.tmp/C/fb33887b4b3a4e558e7880aaf6345a5d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/fb33887b4b3a4e558e7880aaf6345a5d 2024-11-20T13:26:09,364 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/fb33887b4b3a4e558e7880aaf6345a5d, entries=150, sequenceid=543, filesize=12.0 K 2024-11-20T13:26:09,364 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 24db988c4fa8e1a0b1451e8c68b68697 in 1241ms, sequenceid=543, compaction requested=true 2024-11-20T13:26:09,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c2d15be7dacc49a6bc328890fea2ea17, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d3d948a3f3124ce6a4c625585d6f18d7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1cc9a44def8449a3b1ff04bb3f921700, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a538056270cf453da256a4a712924bd5, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/251cb6810656474da79ea12cd8c13211, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2372a5e564ee40c995c2a6b6cd217d44, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/63c352f9ef4843d4ac2d19357f3e5128, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/ce3c50d7a01c41e0a14a4a2e9076a7d3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e915ed797abb4e2689621cd95cb61d76, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d7c79a4509274310865e0663fb4cb2c3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5] to archive 2024-11-20T13:26:09,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:09,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7fd46ae62fdb4525b31116511aa09b3b 2024-11-20T13:26:09,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6881109040984ec484332e67d3905073 2024-11-20T13:26:09,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08adb0962e234fad95573cf8d13e5719 2024-11-20T13:26:09,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c2d15be7dacc49a6bc328890fea2ea17 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c2d15be7dacc49a6bc328890fea2ea17 2024-11-20T13:26:09,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2306082589a047e69f9cfadabeb5b041 2024-11-20T13:26:09,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3d064d21581a4f2f9dafe1c8b1ef5040 2024-11-20T13:26:09,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d3d948a3f3124ce6a4c625585d6f18d7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d3d948a3f3124ce6a4c625585d6f18d7 2024-11-20T13:26:09,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2abc462393dc49eea34ac25eab9a91f8 2024-11-20T13:26:09,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1c906134a59d4e7faf252754fe118ab9 2024-11-20T13:26:09,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d6be1c2c892b443e92aaae103294ed1c 2024-11-20T13:26:09,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fbc47ce021b7450684f8c72202cf668d 2024-11-20T13:26:09,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1cc9a44def8449a3b1ff04bb3f921700 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/1cc9a44def8449a3b1ff04bb3f921700 2024-11-20T13:26:09,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e0c3fc96472c4e058da5421b73ccb779 2024-11-20T13:26:09,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/08339bdae526402f9ec3a36a78067a15 2024-11-20T13:26:09,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a538056270cf453da256a4a712924bd5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a538056270cf453da256a4a712924bd5 2024-11-20T13:26:09,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/c4c6940f3dc04d57a15ad763d97bfa5d 2024-11-20T13:26:09,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7e3a11fe32cd4be5ba6d2cb819c252db 2024-11-20T13:26:09,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/251cb6810656474da79ea12cd8c13211 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/251cb6810656474da79ea12cd8c13211 2024-11-20T13:26:09,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/fb53bbb8a9cf41269e0a23325a2a51cc 2024-11-20T13:26:09,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/6acb498247d7428eb57f2a44136ef781 2024-11-20T13:26:09,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2372a5e564ee40c995c2a6b6cd217d44 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2372a5e564ee40c995c2a6b6cd217d44 2024-11-20T13:26:09,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b945c28075064b748d919ac0e4480a3b 2024-11-20T13:26:09,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/2de774e9ea6e4b868f4e87fe0fe91bd4 2024-11-20T13:26:09,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/63c352f9ef4843d4ac2d19357f3e5128 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/63c352f9ef4843d4ac2d19357f3e5128 2024-11-20T13:26:09,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/7d4f72ddd2be42cab5d39e0004b72355 2024-11-20T13:26:09,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:09,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/b3863c0d3dde426c81edc2c8e2f942f1 2024-11-20T13:26:09,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/a572f769238448e7820c8df3e7f4b69b 2024-11-20T13:26:09,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/ce3c50d7a01c41e0a14a4a2e9076a7d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/ce3c50d7a01c41e0a14a4a2e9076a7d3 2024-11-20T13:26:09,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/93762813718a458a881ab3e82f876f4c 2024-11-20T13:26:09,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/20033fa1608d4fe7999bba38b8856b5c 2024-11-20T13:26:09,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e915ed797abb4e2689621cd95cb61d76 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e915ed797abb4e2689621cd95cb61d76 2024-11-20T13:26:09,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/bad00b8a8d344878b5d0cee9d47e41aa 2024-11-20T13:26:09,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/461a8a29000e47f3bed18d1fb17be29a 2024-11-20T13:26:09,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d7c79a4509274310865e0663fb4cb2c3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/d7c79a4509274310865e0663fb4cb2c3 2024-11-20T13:26:09,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/48d77827627246f691c5d1c174ff2922 2024-11-20T13:26:09,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/270ba06983774f9f84428e34ea2e0be5 2024-11-20T13:26:09,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c934287fcaf4e20998d2db860f3eb6e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/e5ff809939c9404aaf37a59e0b5ed2d0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b5b9637b29f44e3b98cdf3ec08bff2af, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/bf073f2b54194d418cc8e6efaac88f43, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c95924044848465a9f1b96d63f504afb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d4b6ea62f7bd4cf79777f7a184bf5289, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d45f27ba47614c44beca21ba410dcd39, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a27284d3e01344908074e09fcee3f71c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0540113c82234714bfb510941a57992c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5cb51c4814ec42988837cafae76d7cf0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98] to archive 2024-11-20T13:26:09,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:09,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9f51919d0c1e41748bb1fc2df04a0c92 2024-11-20T13:26:09,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/1e8592d9d2ae4187a06c517e95bee0e4 2024-11-20T13:26:09,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c934287fcaf4e20998d2db860f3eb6e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c934287fcaf4e20998d2db860f3eb6e 2024-11-20T13:26:09,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3db8ec8a39514384ad2b5b24a7c70e23 2024-11-20T13:26:09,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/4c8c2c92563b491e8f6820ad2c9ffdda 2024-11-20T13:26:09,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/e5ff809939c9404aaf37a59e0b5ed2d0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/e5ff809939c9404aaf37a59e0b5ed2d0 2024-11-20T13:26:09,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7d9ac50e8f66461fae633b150ffdc9ed 2024-11-20T13:26:09,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/37b0c0001b504b508e4fd37261c11775 2024-11-20T13:26:09,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80353f5555c4421d9137e3da98208d95 2024-11-20T13:26:09,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/47e2ff099e244fa6b8bca49b42874bdc 2024-11-20T13:26:09,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b5b9637b29f44e3b98cdf3ec08bff2af to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b5b9637b29f44e3b98cdf3ec08bff2af 2024-11-20T13:26:09,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a3c01beae564409a222ad584c1d9573 2024-11-20T13:26:09,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/19013e59355b49bc90cf16add05a1a27 2024-11-20T13:26:09,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/bf073f2b54194d418cc8e6efaac88f43 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/bf073f2b54194d418cc8e6efaac88f43 2024-11-20T13:26:09,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/2405a5dc4a95406599220981b83f47d3 2024-11-20T13:26:09,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/55b8bb98d802419f9be8a885a1f5a4c9 2024-11-20T13:26:09,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c95924044848465a9f1b96d63f504afb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c95924044848465a9f1b96d63f504afb 2024-11-20T13:26:09,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5b11d7746095487bab6947619497ab8f 2024-11-20T13:26:09,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a5c2bcf0b5a94f4290d50777c4472c4d 2024-11-20T13:26:09,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d4b6ea62f7bd4cf79777f7a184bf5289 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d4b6ea62f7bd4cf79777f7a184bf5289 2024-11-20T13:26:09,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9874e2454a774fc5a48241e5fde35aa9 2024-11-20T13:26:09,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/f2c85e6e067445589fac057d47c69e06 2024-11-20T13:26:09,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d45f27ba47614c44beca21ba410dcd39 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d45f27ba47614c44beca21ba410dcd39 2024-11-20T13:26:09,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/c36fefc164554a6d99a6021300952d38 2024-11-20T13:26:09,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a790ccff67d84c88994d58ea19d9fdfa 2024-11-20T13:26:09,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/80f441647ef64911b8c8a9da5712b30d 2024-11-20T13:26:09,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a27284d3e01344908074e09fcee3f71c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/a27284d3e01344908074e09fcee3f71c 2024-11-20T13:26:09,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/9943adf9310a4e20977fda70b5ca4064 2024-11-20T13:26:09,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0a5e167da42347e6802c98e9a6524096 2024-11-20T13:26:09,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0540113c82234714bfb510941a57992c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/0540113c82234714bfb510941a57992c 2024-11-20T13:26:09,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/7929b418689c491ebb661e1885265537 2024-11-20T13:26:09,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/76b93991f6e14ac4a026f843d403fa96 2024-11-20T13:26:09,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5cb51c4814ec42988837cafae76d7cf0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/5cb51c4814ec42988837cafae76d7cf0 2024-11-20T13:26:09,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dd90fbd4a847466da52b5b5f94734151 2024-11-20T13:26:09,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/b20f0e83e6804970a8b906e72d57e959 2024-11-20T13:26:09,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/3a4870618c9d47ca9a3648598361cf98 2024-11-20T13:26:09,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/d8a43a68c16246eaae422c0e870e9e61, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2342bb2b7c50446d9be5d58f6b07caa3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/84d813dee8f8465c90dcb71129140f8c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6af369bc751149c6b661052b2881ee58, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/5e501b8f8389463eb88875115144fbb3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0a3e68140293407c85478108332453a4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ee0927e1f8a342e4a0aafa731d180fc9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/43215b4e9d1a4343b1ec318c3a004583, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/abb68bad8792450eac515b6fe8e8dc51, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4602acc8c28e4e4b81d0ef1112a353bd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1] to archive 2024-11-20T13:26:09,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:09,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/f48be3db5b8141e8bda297d80bed7dfa 2024-11-20T13:26:09,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd6571704e494a0d877639e50c0088be 2024-11-20T13:26:09,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/d8a43a68c16246eaae422c0e870e9e61 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/d8a43a68c16246eaae422c0e870e9e61 2024-11-20T13:26:09,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/67bb5e5857884960aff3f5ec272abce4 2024-11-20T13:26:09,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bcf6a7ceca2d4f89b6a966eb0b596ea8 2024-11-20T13:26:09,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2342bb2b7c50446d9be5d58f6b07caa3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2342bb2b7c50446d9be5d58f6b07caa3 2024-11-20T13:26:09,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dd508aab823846759432be4a2a254354 2024-11-20T13:26:09,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/12a511d6e2d8486b8101467303a71cbd 2024-11-20T13:26:09,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3545c9d5c9f345fc9b03ab1a80f43bec 2024-11-20T13:26:09,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/a2d3450c64a146b1bb32f47119a15df9 2024-11-20T13:26:09,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/84d813dee8f8465c90dcb71129140f8c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/84d813dee8f8465c90dcb71129140f8c 2024-11-20T13:26:09,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/19a1fab2bb6e4c01a5b92c64bfda95b0 2024-11-20T13:26:09,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/2e3bc0e0f1a948fda62731ac8c2eb295 2024-11-20T13:26:09,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6af369bc751149c6b661052b2881ee58 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6af369bc751149c6b661052b2881ee58 2024-11-20T13:26:09,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/817baf0ba65042648d32a84a32d6caeb 2024-11-20T13:26:09,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/1ba25dcd0e1849a3b54d9ba0e908dafe 2024-11-20T13:26:09,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/5e501b8f8389463eb88875115144fbb3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/5e501b8f8389463eb88875115144fbb3 2024-11-20T13:26:09,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0b9aadb13d9d427faf3af503b6a5c969 2024-11-20T13:26:09,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/dff161644bce4059ae8ef91ca54cbc6b 2024-11-20T13:26:09,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0a3e68140293407c85478108332453a4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0a3e68140293407c85478108332453a4 2024-11-20T13:26:09,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3820e06aca9444fcacb345a916c3d91e 2024-11-20T13:26:09,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/6de00098c3024959a37c844fd3e1911f 2024-11-20T13:26:09,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ee0927e1f8a342e4a0aafa731d180fc9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ee0927e1f8a342e4a0aafa731d180fc9 2024-11-20T13:26:09,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/3fd25629c25b4c30897d2c6906e8b375 2024-11-20T13:26:09,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c36a79ddbe1a4618b1fdb04b8157d9bf 2024-11-20T13:26:09,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4e3b57003a194f8bb589bdb8bbcaba48 2024-11-20T13:26:09,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/43215b4e9d1a4343b1ec318c3a004583 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/43215b4e9d1a4343b1ec318c3a004583 2024-11-20T13:26:09,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/0cd885729def44599be60f9fd70cdf0b 2024-11-20T13:26:09,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/c573fd03521e4381bfe6c2462bd2181e 2024-11-20T13:26:09,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/abb68bad8792450eac515b6fe8e8dc51 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/abb68bad8792450eac515b6fe8e8dc51 2024-11-20T13:26:09,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/ade65092f23d4140bcf0b74e3669a658 2024-11-20T13:26:09,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37a918d1775643e0a71647a4070d0b3e 2024-11-20T13:26:09,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4602acc8c28e4e4b81d0ef1112a353bd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/4602acc8c28e4e4b81d0ef1112a353bd 2024-11-20T13:26:09,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/37960c7b80774a3b802c1953fb94923a 2024-11-20T13:26:09,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/396930f66c52439b8fc68634eed50786 2024-11-20T13:26:09,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/bbe60fb613fb48099a581c40db3326f1 2024-11-20T13:26:09,496 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/recovered.edits/546.seqid, newMaxSeqId=546, maxSeqId=1 2024-11-20T13:26:09,497 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697. 2024-11-20T13:26:09,497 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for 24db988c4fa8e1a0b1451e8c68b68697: 2024-11-20T13:26:09,498 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed 24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:09,499 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=24db988c4fa8e1a0b1451e8c68b68697, regionState=CLOSED 2024-11-20T13:26:09,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-20T13:26:09,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure 24db988c4fa8e1a0b1451e8c68b68697, server=5ef453f0fbb6,46739,1732109006137 in 2.2050 sec 2024-11-20T13:26:09,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-20T13:26:09,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=24db988c4fa8e1a0b1451e8c68b68697, UNASSIGN in 2.2100 sec 2024-11-20T13:26:09,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T13:26:09,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2120 sec 2024-11-20T13:26:09,506 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109169506"}]},"ts":"1732109169506"} 2024-11-20T13:26:09,507 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:26:09,509 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:26:09,510 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2290 sec 2024-11-20T13:26:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T13:26:11,390 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-20T13:26:11,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T13:26:11,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=158, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,394 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:11,396 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/recovered.edits] 2024-11-20T13:26:11,399 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3421051760d34f8996a64170b1f4112a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/3421051760d34f8996a64170b1f4112a 2024-11-20T13:26:11,400 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/9c47dadcb09d4bb2957ff65907ffcd45 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/9c47dadcb09d4bb2957ff65907ffcd45 2024-11-20T13:26:11,401 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e33d7bdc7f57407c97e6128dbe9aa69c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/A/e33d7bdc7f57407c97e6128dbe9aa69c 2024-11-20T13:26:11,403 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/685c48fa401243a18a3ce7e01868831b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/685c48fa401243a18a3ce7e01868831b 2024-11-20T13:26:11,404 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d110fb2ec4ba48eeab5e180945f738b4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/d110fb2ec4ba48eeab5e180945f738b4 2024-11-20T13:26:11,404 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dab8f64a74984ab4bea2259d932c4fca to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/B/dab8f64a74984ab4bea2259d932c4fca 2024-11-20T13:26:11,406 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/49e064ff83e847628e725d1df44d4958 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/49e064ff83e847628e725d1df44d4958 2024-11-20T13:26:11,407 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/b7b9e013432f4c64bb5a0731a934084a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/b7b9e013432f4c64bb5a0731a934084a 2024-11-20T13:26:11,408 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/fb33887b4b3a4e558e7880aaf6345a5d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/C/fb33887b4b3a4e558e7880aaf6345a5d 2024-11-20T13:26:11,409 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/recovered.edits/546.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697/recovered.edits/546.seqid 2024-11-20T13:26:11,410 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/24db988c4fa8e1a0b1451e8c68b68697 2024-11-20T13:26:11,410 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:26:11,412 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=158, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,413 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:26:11,415 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:26:11,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=158, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:26:11,416 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109171416"}]},"ts":"9223372036854775807"} 2024-11-20T13:26:11,417 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:26:11,417 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 24db988c4fa8e1a0b1451e8c68b68697, NAME => 'TestAcidGuarantees,,1732109143243.24db988c4fa8e1a0b1451e8c68b68697.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:26:11,417 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:26:11,418 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109171417"}]},"ts":"9223372036854775807"} 2024-11-20T13:26:11,419 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:26:11,421 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=158, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 31 msec 2024-11-20T13:26:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-20T13:26:11,493 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-11-20T13:26:11,505 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 238) - Thread LEAK? -, OpenFileDescriptor=450 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=742 (was 944), ProcessCount=11 (was 11), AvailableMemoryMB=396 (was 248) - AvailableMemoryMB LEAK? - 2024-11-20T13:26:11,514 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=742, ProcessCount=11, AvailableMemoryMB=395 2024-11-20T13:26:11,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:26:11,520 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:26:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:11,521 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T13:26:11,522 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:11,522 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 159 2024-11-20T13:26:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:11,522 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T13:26:11,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742418_1594 (size=963) 2024-11-20T13:26:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:11,929 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc 2024-11-20T13:26:11,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742419_1595 (size=53) 2024-11-20T13:26:12,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b7ff2ddfd4733260af7cbc9b7e7d2218, disabling compactions & flushes 2024-11-20T13:26:12,334 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. after waiting 0 ms 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,334 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,334 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:12,335 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T13:26:12,335 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732109172335"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732109172335"}]},"ts":"1732109172335"} 2024-11-20T13:26:12,336 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T13:26:12,337 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T13:26:12,337 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109172337"}]},"ts":"1732109172337"} 2024-11-20T13:26:12,338 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T13:26:12,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, ASSIGN}] 2024-11-20T13:26:12,342 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, ASSIGN 2024-11-20T13:26:12,342 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, ASSIGN; state=OFFLINE, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=false 2024-11-20T13:26:12,493 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:12,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; OpenRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:26:12,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:12,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:12,649 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,649 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7285): Opening region: {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:26:12,649 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,649 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:26:12,649 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7327): checking encryption for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,649 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7330): checking classloading for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,650 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,651 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:12,651 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName A 2024-11-20T13:26:12,652 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:12,652 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:12,652 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,653 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:12,653 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName B 2024-11-20T13:26:12,653 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:12,654 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:12,654 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,654 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:12,655 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName C 2024-11-20T13:26:12,655 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:12,655 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:12,655 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,656 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,656 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,657 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:26:12,658 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1085): writing seq id for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:12,660 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T13:26:12,660 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1102): Opened b7ff2ddfd4733260af7cbc9b7e7d2218; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74778230, jitterRate=0.11428245902061462}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:26:12,661 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1001): Region open journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:12,662 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., pid=161, masterSystemTime=1732109172645 2024-11-20T13:26:12,663 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,663 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:12,663 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=OPEN, openSeqNum=2, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:12,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-20T13:26:12,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; OpenRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 in 170 msec 2024-11-20T13:26:12,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T13:26:12,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, ASSIGN in 324 msec 2024-11-20T13:26:12,666 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T13:26:12,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109172667"}]},"ts":"1732109172667"} 2024-11-20T13:26:12,667 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T13:26:12,670 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T13:26:12,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-11-20T13:26:13,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T13:26:13,626 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T13:26:13,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41799513 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b8f7bf5 2024-11-20T13:26:13,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72ac0393, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:13,634 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:13,635 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:13,636 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T13:26:13,637 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T13:26:13,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T13:26:13,638 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T13:26:13,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:13,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742420_1596 (size=999) 2024-11-20T13:26:14,047 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T13:26:14,048 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T13:26:14,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:26:14,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, REOPEN/MOVE}] 2024-11-20T13:26:14,051 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, REOPEN/MOVE 2024-11-20T13:26:14,051 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,052 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:26:14,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; CloseRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:26:14,203 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,204 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(124): Close b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,204 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:26:14,204 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1681): Closing b7ff2ddfd4733260af7cbc9b7e7d2218, disabling compactions & flushes 2024-11-20T13:26:14,204 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,204 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,204 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. after waiting 0 ms 2024-11-20T13:26:14,204 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,208 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T13:26:14,209 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,209 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1635): Region close journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:14,209 WARN [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegionServer(3786): Not adding moved region record: b7ff2ddfd4733260af7cbc9b7e7d2218 to self. 2024-11-20T13:26:14,210 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(170): Closed b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=CLOSED 2024-11-20T13:26:14,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-11-20T13:26:14,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; CloseRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 in 159 msec 2024-11-20T13:26:14,213 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, REOPEN/MOVE; state=CLOSED, location=5ef453f0fbb6,46739,1732109006137; forceNewPlan=false, retain=true 2024-11-20T13:26:14,363 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=OPENING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:26:14,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,518 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,518 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} 2024-11-20T13:26:14,519 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,519 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T13:26:14,519 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,519 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,520 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,520 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:14,521 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName A 2024-11-20T13:26:14,522 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:14,522 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:14,522 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,523 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:14,523 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName B 2024-11-20T13:26:14,523 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:14,523 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:14,524 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,524 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T13:26:14,524 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7ff2ddfd4733260af7cbc9b7e7d2218 columnFamilyName C 2024-11-20T13:26:14,524 DEBUG [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:14,525 INFO [StoreOpener-b7ff2ddfd4733260af7cbc9b7e7d2218-1 {}] regionserver.HStore(327): Store=b7ff2ddfd4733260af7cbc9b7e7d2218/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T13:26:14,525 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,525 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,526 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,527 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T13:26:14,528 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,529 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened b7ff2ddfd4733260af7cbc9b7e7d2218; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66863924, jitterRate=-0.003649890422821045}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T13:26:14,530 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:14,530 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., pid=166, masterSystemTime=1732109174516 2024-11-20T13:26:14,531 DEBUG [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,532 INFO [RS_OPEN_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,532 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=OPEN, openSeqNum=5, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-11-20T13:26:14,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 in 169 msec 2024-11-20T13:26:14,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T13:26:14,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, REOPEN/MOVE in 484 msec 2024-11-20T13:26:14,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-11-20T13:26:14,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 486 msec 2024-11-20T13:26:14,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 899 msec 2024-11-20T13:26:14,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-11-20T13:26:14,539 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x71d2b4d3 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17229cf 2024-11-20T13:26:14,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13d002e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787b683a to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68d70aab 2024-11-20T13:26:14,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b839a12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,547 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dae921d to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d7c5151 2024-11-20T13:26:14,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a4811e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x459417c6 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a4d20fc 2024-11-20T13:26:14,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60db72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53eb3c7c to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5022bc3d 2024-11-20T13:26:14,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569f5600, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e926f6c to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@229117e8 2024-11-20T13:26:14,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5218ef5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x239fc289 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91baf45 2024-11-20T13:26:14,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cecf303, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,572 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26af975e to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@721a6549 2024-11-20T13:26:14,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@995f4e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,583 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x645da910 to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45ccb8ff 2024-11-20T13:26:14,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63d87640, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,586 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4299ae8d to 127.0.0.1:53074 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6232ed61 2024-11-20T13:26:14,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4278ed11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T13:26:14,604 DEBUG [hconnection-0x3c870473-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,605 DEBUG [hconnection-0x48028a80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,606 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,606 DEBUG [hconnection-0x1831a06d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,607 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,607 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:14,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T13:26:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:14,610 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:14,611 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:14,611 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:14,614 DEBUG [hconnection-0x236dfb28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,614 DEBUG [hconnection-0x453706-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,614 DEBUG [hconnection-0x792f123d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,614 DEBUG [hconnection-0x41468693-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,615 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,615 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,615 DEBUG [hconnection-0xe6c8d28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,615 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,616 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,618 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:14,638 DEBUG [hconnection-0xbac3c93-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,639 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,643 DEBUG [hconnection-0x711a78f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T13:26:14,644 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32898, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T13:26:14,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109234641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109234642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109234644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109234644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109234647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cdd4aedef1c14b1dba2ed87c76288192_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109174612/Put/seqid=0 2024-11-20T13:26:14,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:14,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742421_1597 (size=12154) 2024-11-20T13:26:14,718 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:14,724 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cdd4aedef1c14b1dba2ed87c76288192_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdd4aedef1c14b1dba2ed87c76288192_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:14,726 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/212bc107bff14154b328eaa3be9d6064, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:14,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/212bc107bff14154b328eaa3be9d6064 is 175, key is test_row_0/A:col10/1732109174612/Put/seqid=0 2024-11-20T13:26:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742422_1598 (size=30955) 2024-11-20T13:26:14,733 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/212bc107bff14154b328eaa3be9d6064 2024-11-20T13:26:14,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109234748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109234748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109234750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109234750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109234756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T13:26:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/1fefda6b3b1f4a339eab862c44d6af83 is 50, key is test_row_0/B:col10/1732109174612/Put/seqid=0 2024-11-20T13:26:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742423_1599 (size=12001) 2024-11-20T13:26:14,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/1fefda6b3b1f4a339eab862c44d6af83 2024-11-20T13:26:14,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7dc34990f10f4981b3f34bd78a44efb7 is 50, key is test_row_0/C:col10/1732109174612/Put/seqid=0 2024-11-20T13:26:14,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742424_1600 (size=12001) 2024-11-20T13:26:14,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:14,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T13:26:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:14,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:14,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:14,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109234949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109234950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109234951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109234953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:14,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109234959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,015 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:26:15,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T13:26:15,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:15,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:15,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T13:26:15,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:15,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:15,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:15,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:15,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109235254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109235254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109235254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109235255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7dc34990f10f4981b3f34bd78a44efb7 2024-11-20T13:26:15,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109235264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/212bc107bff14154b328eaa3be9d6064 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064 2024-11-20T13:26:15,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T13:26:15,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/1fefda6b3b1f4a339eab862c44d6af83 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83 2024-11-20T13:26:15,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:26:15,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7dc34990f10f4981b3f34bd78a44efb7 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7 2024-11-20T13:26:15,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T13:26:15,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 671ms, sequenceid=15, compaction requested=false 2024-11-20T13:26:15,289 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T13:26:15,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:15,383 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T13:26:15,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:15,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:15,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:15,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e228bd12646648f6839c2eafce555c02_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109174642/Put/seqid=0 2024-11-20T13:26:15,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742425_1601 (size=12154) 2024-11-20T13:26:15,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:15,410 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e228bd12646648f6839c2eafce555c02_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e228bd12646648f6839c2eafce555c02_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:15,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0e8e60c701ae4f9dbabee68d43c4eb7a, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:15,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0e8e60c701ae4f9dbabee68d43c4eb7a is 175, key is test_row_0/A:col10/1732109174642/Put/seqid=0 2024-11-20T13:26:15,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742426_1602 (size=30955) 2024-11-20T13:26:15,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:15,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:15,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:15,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109235762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109235763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109235764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109235765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109235771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,818 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0e8e60c701ae4f9dbabee68d43c4eb7a 2024-11-20T13:26:15,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0fa1519ca8f340b0bc2f03d34db909a3 is 50, key is test_row_0/B:col10/1732109174642/Put/seqid=0 2024-11-20T13:26:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742427_1603 (size=12001) 2024-11-20T13:26:15,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109235866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109235866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109235869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:15,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109235869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109236069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109236069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109236071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109236072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,232 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0fa1519ca8f340b0bc2f03d34db909a3 2024-11-20T13:26:16,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d792969311724638bf1edeb84404d2f0 is 50, key is test_row_0/C:col10/1732109174642/Put/seqid=0 2024-11-20T13:26:16,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742428_1604 (size=12001) 2024-11-20T13:26:16,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109236371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109236374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109236375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109236377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,657 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d792969311724638bf1edeb84404d2f0 2024-11-20T13:26:16,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0e8e60c701ae4f9dbabee68d43c4eb7a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a 2024-11-20T13:26:16,667 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T13:26:16,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0fa1519ca8f340b0bc2f03d34db909a3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3 2024-11-20T13:26:16,672 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T13:26:16,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d792969311724638bf1edeb84404d2f0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0 2024-11-20T13:26:16,680 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T13:26:16,681 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1297ms, sequenceid=41, compaction requested=false 2024-11-20T13:26:16,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:16,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T13:26:16,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T13:26:16,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T13:26:16,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0710 sec 2024-11-20T13:26:16,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.0760 sec 2024-11-20T13:26:16,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T13:26:16,715 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T13:26:16,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-20T13:26:16,718 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:16,719 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:16,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:16,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:16,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:16,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f6c5b5447c9248539b20c58fa4fc8cad_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742429_1605 (size=12154) 2024-11-20T13:26:16,804 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:16,810 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f6c5b5447c9248539b20c58fa4fc8cad_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f6c5b5447c9248539b20c58fa4fc8cad_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:16,811 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb33cf42a50240eaa2e227c098479e3d, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:16,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb33cf42a50240eaa2e227c098479e3d is 175, key is test_row_0/A:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742430_1606 (size=30955) 2024-11-20T13:26:16,843 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb33cf42a50240eaa2e227c098479e3d 2024-11-20T13:26:16,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0d700bf0064d4643878ad32bd8be5fb4 is 50, key is test_row_0/B:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:16,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:16,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:16,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:16,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109236873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:16,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109236877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109236877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109236880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:16,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109236884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742431_1607 (size=12001) 2024-11-20T13:26:16,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0d700bf0064d4643878ad32bd8be5fb4 2024-11-20T13:26:16,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/3a8475deab2b4af984a00e1ccd17ceb4 is 50, key is test_row_0/C:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742432_1608 (size=12001) 2024-11-20T13:26:16,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/3a8475deab2b4af984a00e1ccd17ceb4 2024-11-20T13:26:16,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb33cf42a50240eaa2e227c098479e3d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d 2024-11-20T13:26:16,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d, entries=150, sequenceid=53, filesize=30.2 K 2024-11-20T13:26:16,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/0d700bf0064d4643878ad32bd8be5fb4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4 2024-11-20T13:26:16,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T13:26:16,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/3a8475deab2b4af984a00e1ccd17ceb4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4 2024-11-20T13:26:16,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T13:26:16,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 142ms, sequenceid=53, compaction requested=true 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:16,923 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:16,923 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:16,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:16,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:16,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:16,924 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:16,924 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,924 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:16,924 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,924 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=90.7 K 2024-11-20T13:26:16,924 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.2 K 2024-11-20T13:26:16,924 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:16,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d] 2024-11-20T13:26:16,925 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 212bc107bff14154b328eaa3be9d6064, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109174612 2024-11-20T13:26:16,925 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fefda6b3b1f4a339eab862c44d6af83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109174612 2024-11-20T13:26:16,926 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e8e60c701ae4f9dbabee68d43c4eb7a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732109174641 2024-11-20T13:26:16,926 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fa1519ca8f340b0bc2f03d34db909a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732109174641 2024-11-20T13:26:16,927 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb33cf42a50240eaa2e227c098479e3d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:16,927 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d700bf0064d4643878ad32bd8be5fb4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:16,934 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:16,937 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112004b84426cd3a43e0a55ad09ec872bbfd_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:16,938 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#520 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:16,939 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/09ec13b851d648c3a95966f7d847f0f8 is 50, key is test_row_0/B:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,939 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112004b84426cd3a43e0a55ad09ec872bbfd_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:16,939 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004b84426cd3a43e0a55ad09ec872bbfd_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:16,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742433_1609 (size=4469) 2024-11-20T13:26:16,952 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#519 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:16,953 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/42bc632d913543d487211e94e14c8dda is 175, key is test_row_0/A:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:16,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742434_1610 (size=12104) 2024-11-20T13:26:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742435_1611 (size=31058) 2024-11-20T13:26:16,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:16,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:16,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:16,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e2c765f6b89f4bcf9d47ba8004c0d449_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109176978/Put/seqid=0 2024-11-20T13:26:16,997 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/42bc632d913543d487211e94e14c8dda as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda 2024-11-20T13:26:17,002 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into 42bc632d913543d487211e94e14c8dda(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:17,002 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:17,002 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=13, startTime=1732109176923; duration=0sec 2024-11-20T13:26:17,002 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:17,002 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:17,003 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:17,003 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:17,003 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:17,003 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,004 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.2 K 2024-11-20T13:26:17,004 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dc34990f10f4981b3f34bd78a44efb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732109174612 2024-11-20T13:26:17,005 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting d792969311724638bf1edeb84404d2f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732109174641 2024-11-20T13:26:17,005 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a8475deab2b4af984a00e1ccd17ceb4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742436_1612 (size=12154) 2024-11-20T13:26:17,013 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#522 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:17,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109237013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,013 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/b5607c7f31e042ad9a5f78e3742e4fc9 is 50, key is test_row_0/C:col10/1732109175761/Put/seqid=0 2024-11-20T13:26:17,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742437_1613 (size=12104) 2024-11-20T13:26:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:17,024 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/b5607c7f31e042ad9a5f78e3742e4fc9 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/b5607c7f31e042ad9a5f78e3742e4fc9 2024-11-20T13:26:17,028 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,031 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into b5607c7f31e042ad9a5f78e3742e4fc9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:17,031 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:17,031 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=13, startTime=1732109176923; duration=0sec 2024-11-20T13:26:17,031 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:17,031 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:17,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109237114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109237316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:17,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,372 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/09ec13b851d648c3a95966f7d847f0f8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/09ec13b851d648c3a95966f7d847f0f8 2024-11-20T13:26:17,376 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 09ec13b851d648c3a95966f7d847f0f8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:17,376 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:17,377 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=13, startTime=1732109176923; duration=0sec 2024-11-20T13:26:17,377 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:17,377 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:17,407 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:17,411 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e2c765f6b89f4bcf9d47ba8004c0d449_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e2c765f6b89f4bcf9d47ba8004c0d449_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:17,412 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b0c3265b374a4d929c65a0dfe290166f, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:17,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b0c3265b374a4d929c65a0dfe290166f is 175, key is test_row_0/A:col10/1732109176978/Put/seqid=0 2024-11-20T13:26:17,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742438_1614 (size=30955) 2024-11-20T13:26:17,487 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109237620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,818 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b0c3265b374a4d929c65a0dfe290166f 2024-11-20T13:26:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:17,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/c38c99d20df44843a7f7f0e2417679a8 is 50, key is test_row_0/B:col10/1732109176978/Put/seqid=0 2024-11-20T13:26:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742439_1615 (size=12001) 2024-11-20T13:26:17,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109237882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109237886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109237886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109237888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:17,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:17,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,100 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:18,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:18,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:18,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109238124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/c38c99d20df44843a7f7f0e2417679a8 2024-11-20T13:26:18,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/494109b7d42f451abe2ae634bb21d9ea is 50, key is test_row_0/C:col10/1732109176978/Put/seqid=0 2024-11-20T13:26:18,253 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:18,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:18,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742440_1616 (size=12001) 2024-11-20T13:26:18,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:18,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:18,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,575 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:18,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:18,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:18,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/494109b7d42f451abe2ae634bb21d9ea 2024-11-20T13:26:18,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b0c3265b374a4d929c65a0dfe290166f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f 2024-11-20T13:26:18,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T13:26:18,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/c38c99d20df44843a7f7f0e2417679a8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8 2024-11-20T13:26:18,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T13:26:18,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/494109b7d42f451abe2ae634bb21d9ea as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea 2024-11-20T13:26:18,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T13:26:18,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1706ms, sequenceid=78, compaction requested=false 2024-11-20T13:26:18,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:18,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,729 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:18,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:18,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b47572ae3434e05be0acb873179e19e_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109177008/Put/seqid=0 2024-11-20T13:26:18,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742441_1617 (size=12154) 2024-11-20T13:26:18,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,754 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b47572ae3434e05be0acb873179e19e_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b47572ae3434e05be0acb873179e19e_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:18,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/1992484778384fd1bb6a2e46c0855600, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:18,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/1992484778384fd1bb6a2e46c0855600 is 175, key is test_row_0/A:col10/1732109177008/Put/seqid=0 2024-11-20T13:26:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742442_1618 (size=30955) 2024-11-20T13:26:18,761 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/1992484778384fd1bb6a2e46c0855600 2024-11-20T13:26:18,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/268b90b67dc149eb8d746ccad9beabcb is 50, key is test_row_0/B:col10/1732109177008/Put/seqid=0 2024-11-20T13:26:18,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742443_1619 (size=12001) 2024-11-20T13:26:18,773 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/268b90b67dc149eb8d746ccad9beabcb 2024-11-20T13:26:18,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a596fbb7e2654d9289065b947f21e43a is 50, key is test_row_0/C:col10/1732109177008/Put/seqid=0 2024-11-20T13:26:18,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742444_1620 (size=12001) 2024-11-20T13:26:18,791 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a596fbb7e2654d9289065b947f21e43a 2024-11-20T13:26:18,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/1992484778384fd1bb6a2e46c0855600 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600 2024-11-20T13:26:18,803 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600, entries=150, sequenceid=92, filesize=30.2 K 2024-11-20T13:26:18,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/268b90b67dc149eb8d746ccad9beabcb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb 2024-11-20T13:26:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,809 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T13:26:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a596fbb7e2654d9289065b947f21e43a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a 2024-11-20T13:26:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,817 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T13:26:18,818 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 89ms, sequenceid=92, compaction requested=true 2024-11-20T13:26:18,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:18,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-20T13:26:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-20T13:26:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T13:26:18,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1010 sec 2024-11-20T13:26:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.1050 sec 2024-11-20T13:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T13:26:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,824 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T13:26:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-20T13:26:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,827 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,828 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,828 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T13:26:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,980 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:18,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:18,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-20T13:26:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-20T13:26:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T13:26:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 156 msec 2024-11-20T13:26:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 161 msec 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T13:26:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,131 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-20T13:26:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,137 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:19,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T13:26:19,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:19,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:19,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:19,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:19,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:19,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:19,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209395889b4fbb4abf87643fc12a628cce_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742445_1621 (size=12154) 2024-11-20T13:26:19,291 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:19,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:19,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109239302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109239405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:19,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:19,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:19,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,597 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:19,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:19,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109239609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,654 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:19,657 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209395889b4fbb4abf87643fc12a628cce_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209395889b4fbb4abf87643fc12a628cce_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:19,658 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/76c508b03f5843d49b3e983f3513f8be, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:19,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/76c508b03f5843d49b3e983f3513f8be is 175, key is test_row_0/A:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:19,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742446_1622 (size=30955) 2024-11-20T13:26:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:19,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,751 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109239887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,889 DEBUG [Thread-2630 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:19,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109239897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,899 DEBUG [Thread-2632 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:19,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109239898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,901 DEBUG [Thread-2636 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:19,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:19,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:19,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109239903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:19,905 DEBUG [Thread-2628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:19,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109239912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,022 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T13:26:20,056 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:20,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:20,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,063 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=104, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/76c508b03f5843d49b3e983f3513f8be 2024-11-20T13:26:20,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/58b2416300d743c0a76ebe8fb78fc2e6 is 50, key is test_row_0/B:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:20,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742447_1623 (size=12001) 2024-11-20T13:26:20,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/58b2416300d743c0a76ebe8fb78fc2e6 2024-11-20T13:26:20,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/c05d3a581c044f00bde72e8ef50eca64 is 50, key is test_row_0/C:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:20,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742448_1624 (size=12001) 2024-11-20T13:26:20,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:20,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:20,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:20,373 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:20,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109240415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,527 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:20,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/c05d3a581c044f00bde72e8ef50eca64 2024-11-20T13:26:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/76c508b03f5843d49b3e983f3513f8be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be 2024-11-20T13:26:20,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be, entries=150, sequenceid=104, filesize=30.2 K 2024-11-20T13:26:20,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/58b2416300d743c0a76ebe8fb78fc2e6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6 2024-11-20T13:26:20,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6, entries=150, sequenceid=104, filesize=11.7 K 2024-11-20T13:26:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/c05d3a581c044f00bde72e8ef50eca64 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64 2024-11-20T13:26:20,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64, entries=150, sequenceid=104, filesize=11.7 K 2024-11-20T13:26:20,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1363ms, sequenceid=104, compaction requested=true 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:20,591 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:20,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:26:20,592 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:20,593 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:20,593 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:20,593 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:20,593 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=121.0 K 2024-11-20T13:26:20,593 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:20,593 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,593 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be] 2024-11-20T13:26:20,593 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,594 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/09ec13b851d648c3a95966f7d847f0f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=47.0 K 2024-11-20T13:26:20,594 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42bc632d913543d487211e94e14c8dda, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:20,594 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ec13b851d648c3a95966f7d847f0f8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0c3265b374a4d929c65a0dfe290166f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109176863 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c38c99d20df44843a7f7f0e2417679a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109176863 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1992484778384fd1bb6a2e46c0855600, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732109176981 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 268b90b67dc149eb8d746ccad9beabcb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732109176981 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 58b2416300d743c0a76ebe8fb78fc2e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:20,595 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76c508b03f5843d49b3e983f3513f8be, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:20,612 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:20,613 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#531 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:20,614 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/ae22cf0c2b524f3e891879b599dc85ff is 50, key is test_row_0/B:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:20,631 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120fbd66b4b583246fb9d56168a428e3670_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:20,633 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120fbd66b4b583246fb9d56168a428e3670_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:20,633 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fbd66b4b583246fb9d56168a428e3670_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:20,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742449_1625 (size=12241) 2024-11-20T13:26:20,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:20,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T13:26:20,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:20,689 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:20,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:20,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742450_1626 (size=4469) 2024-11-20T13:26:20,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d7033bc3486f4b4a8b7daad164887981_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109179291/Put/seqid=0 2024-11-20T13:26:20,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742451_1627 (size=12154) 2024-11-20T13:26:21,060 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/ae22cf0c2b524f3e891879b599dc85ff as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/ae22cf0c2b524f3e891879b599dc85ff 2024-11-20T13:26:21,065 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into ae22cf0c2b524f3e891879b599dc85ff(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:21,065 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:21,065 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=12, startTime=1732109180591; duration=0sec 2024-11-20T13:26:21,065 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:21,065 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:21,065 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:21,077 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:21,077 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:21,077 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:21,078 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/b5607c7f31e042ad9a5f78e3742e4fc9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=47.0 K 2024-11-20T13:26:21,078 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b5607c7f31e042ad9a5f78e3742e4fc9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732109175761 2024-11-20T13:26:21,078 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 494109b7d42f451abe2ae634bb21d9ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732109176863 2024-11-20T13:26:21,079 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a596fbb7e2654d9289065b947f21e43a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732109176981 2024-11-20T13:26:21,079 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting c05d3a581c044f00bde72e8ef50eca64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:21,109 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#532 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:21,109 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/80f61552d7e3414faa7eb21cade93e6b is 175, key is test_row_0/A:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:21,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:21,120 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#534 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:21,121 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/cc6365c1185244b48ce06b637786a326 is 50, key is test_row_0/C:col10/1732109179212/Put/seqid=0 2024-11-20T13:26:21,124 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d7033bc3486f4b4a8b7daad164887981_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d7033bc3486f4b4a8b7daad164887981_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:21,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/2c69e04bc81c44448839f0f669e40c66, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:21,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/2c69e04bc81c44448839f0f669e40c66 is 175, key is test_row_0/A:col10/1732109179291/Put/seqid=0 2024-11-20T13:26:21,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742452_1628 (size=31195) 2024-11-20T13:26:21,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742453_1629 (size=12241) 2024-11-20T13:26:21,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742454_1630 (size=30955) 2024-11-20T13:26:21,181 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/cc6365c1185244b48ce06b637786a326 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/cc6365c1185244b48ce06b637786a326 2024-11-20T13:26:21,186 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into cc6365c1185244b48ce06b637786a326(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:21,186 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:21,186 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=12, startTime=1732109180591; duration=0sec 2024-11-20T13:26:21,186 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:21,186 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:21,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:21,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:21,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:21,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109241469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:21,553 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/80f61552d7e3414faa7eb21cade93e6b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b 2024-11-20T13:26:21,557 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into 80f61552d7e3414faa7eb21cade93e6b(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:21,557 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:21,557 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=12, startTime=1732109180591; duration=0sec 2024-11-20T13:26:21,557 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:21,557 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:21,571 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/2c69e04bc81c44448839f0f669e40c66 2024-11-20T13:26:21,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109241572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:21,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8b4382c73a564866b60a72e95e3e318a is 50, key is test_row_0/B:col10/1732109179291/Put/seqid=0 2024-11-20T13:26:21,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742455_1631 (size=12001) 2024-11-20T13:26:21,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:21,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109241780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:22,016 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8b4382c73a564866b60a72e95e3e318a 2024-11-20T13:26:22,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a8cfb4a2eb804f87b40638ea1f2f74ee is 50, key is test_row_0/C:col10/1732109179291/Put/seqid=0 2024-11-20T13:26:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742456_1632 (size=12001) 2024-11-20T13:26:22,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109242089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:22,477 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a8cfb4a2eb804f87b40638ea1f2f74ee 2024-11-20T13:26:22,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/2c69e04bc81c44448839f0f669e40c66 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66 2024-11-20T13:26:22,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66, entries=150, sequenceid=129, filesize=30.2 K 2024-11-20T13:26:22,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8b4382c73a564866b60a72e95e3e318a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a 2024-11-20T13:26:22,499 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a, entries=150, sequenceid=129, filesize=11.7 K 2024-11-20T13:26:22,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a8cfb4a2eb804f87b40638ea1f2f74ee as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee 2024-11-20T13:26:22,517 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee, entries=150, sequenceid=129, filesize=11.7 K 2024-11-20T13:26:22,524 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1834ms, sequenceid=129, compaction requested=false 2024-11-20T13:26:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-20T13:26:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-20T13:26:22,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T13:26:22,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3850 sec 2024-11-20T13:26:22,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 3.3950 sec 2024-11-20T13:26:22,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:22,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:26:22,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:22,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:22,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:22,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:22,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:22,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:22,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061fd6724dabe4935a27cea6b44358415_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:22,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742457_1633 (size=12304) 2024-11-20T13:26:22,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109242682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:22,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109242792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109243004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,035 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:23,065 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112061fd6724dabe4935a27cea6b44358415_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061fd6724dabe4935a27cea6b44358415_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:23,072 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/bbf9fd763f824ccd9d45112765229797, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/bbf9fd763f824ccd9d45112765229797 is 175, key is test_row_0/A:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742458_1634 (size=31105) 2024-11-20T13:26:23,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T13:26:23,244 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T13:26:23,246 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:23,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-20T13:26:23,247 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:23,248 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:23,248 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T13:26:23,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109243312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T13:26:23,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T13:26:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,532 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=144, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/bbf9fd763f824ccd9d45112765229797 2024-11-20T13:26:23,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T13:26:23,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/5462bffc26564c56a86e8ac4c0e89ffe is 50, key is test_row_0/B:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T13:26:23,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:23,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:23,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742459_1635 (size=12151) 2024-11-20T13:26:23,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/5462bffc26564c56a86e8ac4c0e89ffe 2024-11-20T13:26:23,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/8168b7bdb550462bb0a31c048253a1ef is 50, key is test_row_0/C:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742460_1636 (size=12151) 2024-11-20T13:26:23,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/8168b7bdb550462bb0a31c048253a1ef 2024-11-20T13:26:23,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/bbf9fd763f824ccd9d45112765229797 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797 2024-11-20T13:26:23,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797, entries=150, sequenceid=144, filesize=30.4 K 2024-11-20T13:26:23,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/5462bffc26564c56a86e8ac4c0e89ffe as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe 2024-11-20T13:26:23,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe, entries=150, sequenceid=144, filesize=11.9 K 2024-11-20T13:26:23,672 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T13:26:23,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/8168b7bdb550462bb0a31c048253a1ef as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef 2024-11-20T13:26:23,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef, entries=150, sequenceid=144, filesize=11.9 K 2024-11-20T13:26:23,678 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T13:26:23,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1079ms, sequenceid=144, compaction requested=true 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:23,679 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:23,679 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:23,681 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:23,681 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:23,681 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:23,681 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:23,681 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,682 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=91.1 K 2024-11-20T13:26:23,682 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,682 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797] 2024-11-20T13:26:23,682 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,682 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/ae22cf0c2b524f3e891879b599dc85ff, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.5 K 2024-11-20T13:26:23,682 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80f61552d7e3414faa7eb21cade93e6b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:23,682 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting ae22cf0c2b524f3e891879b599dc85ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:23,682 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c69e04bc81c44448839f0f669e40c66, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732109179287 2024-11-20T13:26:23,682 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b4382c73a564866b60a72e95e3e318a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732109179287 2024-11-20T13:26:23,683 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 5462bffc26564c56a86e8ac4c0e89ffe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:23,683 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbf9fd763f824ccd9d45112765229797, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:23,690 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:23,690 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/7acac3bf3eac4c5bb1a067f4f6a4f694 is 50, key is test_row_0/B:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,706 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,710 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411200a4452d97b3a4249a1d0ec78a6e43bc2_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,715 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411200a4452d97b3a4249a1d0ec78a6e43bc2_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,715 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200a4452d97b3a4249a1d0ec78a6e43bc2_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,727 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,730 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:23,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:23,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742461_1637 (size=12493) 2024-11-20T13:26:23,745 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/7acac3bf3eac4c5bb1a067f4f6a4f694 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/7acac3bf3eac4c5bb1a067f4f6a4f694 2024-11-20T13:26:23,751 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 7acac3bf3eac4c5bb1a067f4f6a4f694(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:23,751 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:23,751 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=13, startTime=1732109183679; duration=0sec 2024-11-20T13:26:23,751 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:23,751 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:23,751 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:23,754 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:23,754 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:23,754 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:23,755 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/cc6365c1185244b48ce06b637786a326, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.5 K 2024-11-20T13:26:23,755 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cc6365c1185244b48ce06b637786a326, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732109179212 2024-11-20T13:26:23,755 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting a8cfb4a2eb804f87b40638ea1f2f74ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732109179287 2024-11-20T13:26:23,757 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8168b7bdb550462bb0a31c048253a1ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:23,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742462_1638 (size=4469) 2024-11-20T13:26:23,762 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#541 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:23,763 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb650402e44c4dc08033292bdbc713fa is 175, key is test_row_0/A:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e26c1589f80042a791cf8f300f02221f_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109182676/Put/seqid=0 2024-11-20T13:26:23,786 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#543 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:23,787 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/58f12757724b4740b90bbe5ece04f77c is 50, key is test_row_0/C:col10/1732109182599/Put/seqid=0 2024-11-20T13:26:23,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:23,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742463_1639 (size=31447) 2024-11-20T13:26:23,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742464_1640 (size=12304) 2024-11-20T13:26:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:23,835 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cb650402e44c4dc08033292bdbc713fa as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa 2024-11-20T13:26:23,843 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e26c1589f80042a791cf8f300f02221f_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e26c1589f80042a791cf8f300f02221f_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:23,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dca3313e6a854d228ba5210c1bf6babd, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:23,847 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into cb650402e44c4dc08033292bdbc713fa(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:23,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dca3313e6a854d228ba5210c1bf6babd is 175, key is test_row_0/A:col10/1732109182676/Put/seqid=0 2024-11-20T13:26:23,847 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:23,847 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=13, startTime=1732109183679; duration=0sec 2024-11-20T13:26:23,847 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:23,847 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:23,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742465_1641 (size=12493) 2024-11-20T13:26:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T13:26:23,858 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/58f12757724b4740b90bbe5ece04f77c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/58f12757724b4740b90bbe5ece04f77c 2024-11-20T13:26:23,864 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into 58f12757724b4740b90bbe5ece04f77c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:23,864 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:23,864 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=13, startTime=1732109183679; duration=0sec 2024-11-20T13:26:23,864 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:23,864 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:23,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742466_1642 (size=31105) 2024-11-20T13:26:23,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109243875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,881 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dca3313e6a854d228ba5210c1bf6babd 2024-11-20T13:26:23,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109243912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,916 DEBUG [Thread-2632 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:23,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/cd47528d948344f28d3cd703fa7a5772 is 50, key is test_row_0/B:col10/1732109182676/Put/seqid=0 2024-11-20T13:26:23,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109243927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109243928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,931 DEBUG [Thread-2630 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:23,932 DEBUG [Thread-2628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:23,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109243932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,936 DEBUG [Thread-2636 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:23,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742467_1643 (size=12151) 2024-11-20T13:26:23,965 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/cd47528d948344f28d3cd703fa7a5772 2024-11-20T13:26:23,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/f155b9c1ec7d4559af60ae3f07fff30f is 50, key is test_row_0/C:col10/1732109182676/Put/seqid=0 2024-11-20T13:26:23,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109243984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742468_1644 (size=12151) 2024-11-20T13:26:23,992 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/f155b9c1ec7d4559af60ae3f07fff30f 2024-11-20T13:26:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dca3313e6a854d228ba5210c1bf6babd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd 2024-11-20T13:26:24,003 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd, entries=150, sequenceid=168, filesize=30.4 K 2024-11-20T13:26:24,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/cd47528d948344f28d3cd703fa7a5772 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772 2024-11-20T13:26:24,009 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772, entries=150, sequenceid=168, filesize=11.9 K 2024-11-20T13:26:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/f155b9c1ec7d4559af60ae3f07fff30f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f 2024-11-20T13:26:24,014 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f, entries=150, sequenceid=168, filesize=11.9 K 2024-11-20T13:26:24,016 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 286ms, sequenceid=168, compaction requested=false 2024-11-20T13:26:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-20T13:26:24,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-20T13:26:24,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T13:26:24,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 770 msec 2024-11-20T13:26:24,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 774 msec 2024-11-20T13:26:24,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:24,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:24,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:24,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a261c2623f984176aae81840e0816327_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:24,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742469_1645 (size=12304) 2024-11-20T13:26:24,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109244296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T13:26:24,357 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-20T13:26:24,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-20T13:26:24,367 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:24,367 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:24,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:24,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109244400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:24,520 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:24,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:24,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109244616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,622 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:24,646 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a261c2623f984176aae81840e0816327_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a261c2623f984176aae81840e0816327_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:24,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb4d3d102b8143499fc1084eea2cc481, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:24,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb4d3d102b8143499fc1084eea2cc481 is 175, key is test_row_0/A:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:24,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:24,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:24,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742470_1646 (size=31105) 2024-11-20T13:26:24,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:24,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:24,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:24,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:24,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:24,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109244921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:24,995 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:24,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,086 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=184, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb4d3d102b8143499fc1084eea2cc481 2024-11-20T13:26:25,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8dec30daf9414ce0899b4dcadc125361 is 50, key is test_row_0/B:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:25,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742471_1647 (size=12151) 2024-11-20T13:26:25,158 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109245424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:25,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8dec30daf9414ce0899b4dcadc125361 2024-11-20T13:26:25,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6141f85e7dff414498fcc8003a28a9df is 50, key is test_row_0/C:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:25,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742472_1648 (size=12151) 2024-11-20T13:26:25,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,787 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,945 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:25,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:25,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:25,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:26,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6141f85e7dff414498fcc8003a28a9df 2024-11-20T13:26:26,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb4d3d102b8143499fc1084eea2cc481 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481 2024-11-20T13:26:26,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481, entries=150, sequenceid=184, filesize=30.4 K 2024-11-20T13:26:26,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/8dec30daf9414ce0899b4dcadc125361 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361 2024-11-20T13:26:26,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361, entries=150, sequenceid=184, filesize=11.9 K 2024-11-20T13:26:26,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6141f85e7dff414498fcc8003a28a9df as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df 2024-11-20T13:26:26,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df, entries=150, sequenceid=184, filesize=11.9 K 2024-11-20T13:26:26,097 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:26,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1905ms, sequenceid=184, compaction requested=true 2024-11-20T13:26:26,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:26,098 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:26,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:26,098 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:26,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T13:26:26,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:26,099 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:26,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:26,100 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:26,100 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=91.5 K 2024-11-20T13:26:26,100 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481] 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:26,100 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb650402e44c4dc08033292bdbc713fa, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:26,100 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:26,100 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/7acac3bf3eac4c5bb1a067f4f6a4f694, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.9 K 2024-11-20T13:26:26,101 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7acac3bf3eac4c5bb1a067f4f6a4f694, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:26,101 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting dca3313e6a854d228ba5210c1bf6babd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732109182666 2024-11-20T13:26:26,101 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cd47528d948344f28d3cd703fa7a5772, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732109182666 2024-11-20T13:26:26,101 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb4d3d102b8143499fc1084eea2cc481, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:26,102 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dec30daf9414ce0899b4dcadc125361, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:26,118 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#549 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:26,119 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/691287860517458288b94a06a7a32bb8 is 50, key is test_row_0/B:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:26,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5514cac18df4b4181958694010a07bc_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109184283/Put/seqid=0 2024-11-20T13:26:26,142 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:26,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742473_1649 (size=12304) 2024-11-20T13:26:26,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:26,183 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5514cac18df4b4181958694010a07bc_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5514cac18df4b4181958694010a07bc_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/ce3edbf269b54b2db5bcf7d05b907c16, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:26,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/ce3edbf269b54b2db5bcf7d05b907c16 is 175, key is test_row_0/A:col10/1732109184283/Put/seqid=0 2024-11-20T13:26:26,186 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203e0eb23f3fa247e28a084fe6512aea4c_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:26,188 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203e0eb23f3fa247e28a084fe6512aea4c_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:26,188 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e0eb23f3fa247e28a084fe6512aea4c_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:26,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742474_1650 (size=12595) 2024-11-20T13:26:26,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742476_1652 (size=4469) 2024-11-20T13:26:26,236 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#551 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:26,237 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/6d6c3c76874f4581a329e6dbb3e739a6 is 175, key is test_row_0/A:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:26,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742475_1651 (size=31105) 2024-11-20T13:26:26,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742477_1653 (size=31549) 2024-11-20T13:26:26,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:26,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:26,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:26,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:26,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109246477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:26,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:26,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109246582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:26,600 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/691287860517458288b94a06a7a32bb8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/691287860517458288b94a06a7a32bb8 2024-11-20T13:26:26,604 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 691287860517458288b94a06a7a32bb8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:26,604 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:26,604 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=13, startTime=1732109186098; duration=0sec 2024-11-20T13:26:26,604 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:26,604 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:26,605 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:26,605 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:26,606 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:26,606 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:26,606 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/58f12757724b4740b90bbe5ece04f77c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=35.9 K 2024-11-20T13:26:26,607 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 58f12757724b4740b90bbe5ece04f77c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1732109181460 2024-11-20T13:26:26,608 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting f155b9c1ec7d4559af60ae3f07fff30f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732109182666 2024-11-20T13:26:26,608 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 6141f85e7dff414498fcc8003a28a9df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:26,623 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#552 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:26,624 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/8fc2d8a222b1415d93cd5a1affa8250b is 50, key is test_row_0/C:col10/1732109183870/Put/seqid=0 2024-11-20T13:26:26,641 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/ce3edbf269b54b2db5bcf7d05b907c16 2024-11-20T13:26:26,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/2d46091538a84d349ab8a07024450a3a is 50, key is test_row_0/B:col10/1732109184283/Put/seqid=0 2024-11-20T13:26:26,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742478_1654 (size=12595) 2024-11-20T13:26:26,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742479_1655 (size=12151) 2024-11-20T13:26:26,689 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/6d6c3c76874f4581a329e6dbb3e739a6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6 2024-11-20T13:26:26,693 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into 6d6c3c76874f4581a329e6dbb3e739a6(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:26,693 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:26,693 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=13, startTime=1732109186098; duration=0sec 2024-11-20T13:26:26,694 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:26,694 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:26,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:26,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109246792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:27,066 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/8fc2d8a222b1415d93cd5a1affa8250b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8fc2d8a222b1415d93cd5a1affa8250b 2024-11-20T13:26:27,078 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/2d46091538a84d349ab8a07024450a3a 2024-11-20T13:26:27,090 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into 8fc2d8a222b1415d93cd5a1affa8250b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:27,090 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:27,090 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=13, startTime=1732109186098; duration=0sec 2024-11-20T13:26:27,090 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:27,090 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:27,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7aaf53f4224b47f0ab5177b8ed4d1de8 is 50, key is test_row_0/C:col10/1732109184283/Put/seqid=0 2024-11-20T13:26:27,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742480_1656 (size=12151) 2024-11-20T13:26:27,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109247097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:27,500 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7aaf53f4224b47f0ab5177b8ed4d1de8 2024-11-20T13:26:27,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/ce3edbf269b54b2db5bcf7d05b907c16 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16 2024-11-20T13:26:27,507 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16, entries=150, sequenceid=207, filesize=30.4 K 2024-11-20T13:26:27,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/2d46091538a84d349ab8a07024450a3a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a 2024-11-20T13:26:27,511 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T13:26:27,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7aaf53f4224b47f0ab5177b8ed4d1de8 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8 2024-11-20T13:26:27,515 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8, entries=150, sequenceid=207, filesize=11.9 K 2024-11-20T13:26:27,516 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1417ms, sequenceid=207, compaction requested=false 2024-11-20T13:26:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-20T13:26:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-20T13:26:27,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-20T13:26:27,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1490 sec 2024-11-20T13:26:27,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 3.1570 sec 2024-11-20T13:26:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:27,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T13:26:27,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:27,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:27,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:27,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:27,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:27,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:27,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208b345c44b6d942f795de512936090928_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:27,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742481_1657 (size=14794) 2024-11-20T13:26:27,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109247679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:27,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109247782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:27,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109247984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,015 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:28,025 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208b345c44b6d942f795de512936090928_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208b345c44b6d942f795de512936090928_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:28,028 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a6f2fe1ce2844c33a5228fb00ae04e05, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a6f2fe1ce2844c33a5228fb00ae04e05 is 175, key is test_row_0/A:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742482_1658 (size=39749) 2024-11-20T13:26:28,066 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=224, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a6f2fe1ce2844c33a5228fb00ae04e05 2024-11-20T13:26:28,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/41f89a947e2b4fc69646772c3d828117 is 50, key is test_row_0/B:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742483_1659 (size=12151) 2024-11-20T13:26:28,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/41f89a947e2b4fc69646772c3d828117 2024-11-20T13:26:28,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/22ad8b2225db4ab99c58fe81e9a1dfba is 50, key is test_row_0/C:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742484_1660 (size=12151) 2024-11-20T13:26:28,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/22ad8b2225db4ab99c58fe81e9a1dfba 2024-11-20T13:26:28,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a6f2fe1ce2844c33a5228fb00ae04e05 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05 2024-11-20T13:26:28,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05, entries=200, sequenceid=224, filesize=38.8 K 2024-11-20T13:26:28,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/41f89a947e2b4fc69646772c3d828117 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117 2024-11-20T13:26:28,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117, entries=150, sequenceid=224, filesize=11.9 K 2024-11-20T13:26:28,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/22ad8b2225db4ab99c58fe81e9a1dfba as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba 2024-11-20T13:26:28,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba, entries=150, sequenceid=224, filesize=11.9 K 2024-11-20T13:26:28,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 610ms, sequenceid=224, compaction requested=true 2024-11-20T13:26:28,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:28,214 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:28,215 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:28,215 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:28,215 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,215 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=100.0 K 2024-11-20T13:26:28,215 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,215 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05] 2024-11-20T13:26:28,216 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d6c3c76874f4581a329e6dbb3e739a6, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:28,216 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3edbf269b54b2db5bcf7d05b907c16, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109184283 2024-11-20T13:26:28,216 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6f2fe1ce2844c33a5228fb00ae04e05, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:28,222 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:28,223 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:28,223 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:28,223 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,223 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/691287860517458288b94a06a7a32bb8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=36.0 K 2024-11-20T13:26:28,224 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 691287860517458288b94a06a7a32bb8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:28,224 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d46091538a84d349ab8a07024450a3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109184283 2024-11-20T13:26:28,224 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 41f89a947e2b4fc69646772c3d828117, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:28,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:28,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:28,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:28,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:28,241 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,247 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#559 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:28,247 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/00330457c4454efbb1f9a04b8253f9b6 is 50, key is test_row_0/B:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,257 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120bab969bef55b4a67860b597320d69da9_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,262 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120bab969bef55b4a67860b597320d69da9_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,262 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bab969bef55b4a67860b597320d69da9_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:28,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:28,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:28,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742485_1661 (size=12697) 2024-11-20T13:26:28,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742486_1662 (size=4469) 2024-11-20T13:26:28,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e7e92bc1277e4ec3871ed0e343b08c04_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109187650/Put/seqid=0 2024-11-20T13:26:28,331 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#558 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:28,331 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b402942377514915b78f98318f5e0671 is 175, key is test_row_0/A:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742487_1663 (size=12304) 2024-11-20T13:26:28,349 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:28,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:28,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109248349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,370 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e7e92bc1277e4ec3871ed0e343b08c04_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e7e92bc1277e4ec3871ed0e343b08c04_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:28,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742488_1664 (size=31651) 2024-11-20T13:26:28,384 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b6e4da51b5b04e39896067574da7475b, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:28,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b6e4da51b5b04e39896067574da7475b is 175, key is test_row_0/A:col10/1732109187650/Put/seqid=0 2024-11-20T13:26:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742489_1665 (size=31105) 2024-11-20T13:26:28,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:28,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109248458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T13:26:28,476 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-20T13:26:28,485 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:28,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-20T13:26:28,487 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:28,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:28,487 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:28,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:28,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:28,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:28,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109248662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,724 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/00330457c4454efbb1f9a04b8253f9b6 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/00330457c4454efbb1f9a04b8253f9b6 2024-11-20T13:26:28,734 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 00330457c4454efbb1f9a04b8253f9b6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:28,734 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:28,734 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=13, startTime=1732109188222; duration=0sec 2024-11-20T13:26:28,734 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:28,734 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:28,734 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:28,736 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:28,736 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:28,736 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,736 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8fc2d8a222b1415d93cd5a1affa8250b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=36.0 K 2024-11-20T13:26:28,737 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fc2d8a222b1415d93cd5a1affa8250b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732109183852 2024-11-20T13:26:28,737 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aaf53f4224b47f0ab5177b8ed4d1de8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732109184283 2024-11-20T13:26:28,737 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 22ad8b2225db4ab99c58fe81e9a1dfba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:28,749 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#561 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:28,749 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6e357dce76c346f4beddacf984aff043 is 50, key is test_row_0/C:col10/1732109186475/Put/seqid=0 2024-11-20T13:26:28,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742490_1666 (size=12697) 2024-11-20T13:26:28,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:28,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:28,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:28,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,800 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b402942377514915b78f98318f5e0671 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671 2024-11-20T13:26:28,813 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into b402942377514915b78f98318f5e0671(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:28,813 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:28,813 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=13, startTime=1732109188214; duration=0sec 2024-11-20T13:26:28,814 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:28,814 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:28,848 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b6e4da51b5b04e39896067574da7475b 2024-11-20T13:26:28,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/84c1b04fc64d4380b2028190caafdaac is 50, key is test_row_0/B:col10/1732109187650/Put/seqid=0 2024-11-20T13:26:28,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742491_1667 (size=12151) 2024-11-20T13:26:28,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:28,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:28,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:28,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:28,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:28,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109248968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:29,118 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:29,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,192 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6e357dce76c346f4beddacf984aff043 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e357dce76c346f4beddacf984aff043 2024-11-20T13:26:29,198 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into 6e357dce76c346f4beddacf984aff043(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:29,198 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:29,198 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=13, startTime=1732109188230; duration=0sec 2024-11-20T13:26:29,198 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:29,198 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:29,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:29,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:29,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:29,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:29,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:29,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/84c1b04fc64d4380b2028190caafdaac 2024-11-20T13:26:29,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6e663b0c8ff247ef8284c7016a32a5be is 50, key is test_row_0/C:col10/1732109187650/Put/seqid=0 2024-11-20T13:26:29,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742492_1668 (size=12151) 2024-11-20T13:26:29,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6e663b0c8ff247ef8284c7016a32a5be 2024-11-20T13:26:29,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/b6e4da51b5b04e39896067574da7475b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b 2024-11-20T13:26:29,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b, entries=150, sequenceid=247, filesize=30.4 K 2024-11-20T13:26:29,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/84c1b04fc64d4380b2028190caafdaac as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac 2024-11-20T13:26:29,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T13:26:29,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/6e663b0c8ff247ef8284c7016a32a5be as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be 2024-11-20T13:26:29,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T13:26:29,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1160ms, sequenceid=247, compaction requested=false 2024-11-20T13:26:29,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:29,463 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:29,464 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:29,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:29,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:29,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:29,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112054eccaf076e142e0a98aa5207f026074_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109188338/Put/seqid=0 2024-11-20T13:26:29,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742493_1669 (size=12404) 2024-11-20T13:26:29,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:29,545 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112054eccaf076e142e0a98aa5207f026074_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112054eccaf076e142e0a98aa5207f026074_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:29,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dc2359e222d843d394e949b17a84975b, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:29,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dc2359e222d843d394e949b17a84975b is 175, key is test_row_0/A:col10/1732109188338/Put/seqid=0 2024-11-20T13:26:29,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742494_1670 (size=31205) 2024-11-20T13:26:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:29,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109249616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109249720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109249924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:29,987 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=263, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dc2359e222d843d394e949b17a84975b 2024-11-20T13:26:29,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/524b4fd76fa84dad8410de620eabaaad is 50, key is test_row_0/B:col10/1732109188338/Put/seqid=0 2024-11-20T13:26:30,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742495_1671 (size=12251) 2024-11-20T13:26:30,041 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/524b4fd76fa84dad8410de620eabaaad 2024-11-20T13:26:30,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/1f7003d2260f4e3b8caba71bc55bd9eb is 50, key is test_row_0/C:col10/1732109188338/Put/seqid=0 2024-11-20T13:26:30,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742496_1672 (size=12251) 2024-11-20T13:26:30,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109250229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:30,478 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/1f7003d2260f4e3b8caba71bc55bd9eb 2024-11-20T13:26:30,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/dc2359e222d843d394e949b17a84975b as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b 2024-11-20T13:26:30,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b, entries=150, sequenceid=263, filesize=30.5 K 2024-11-20T13:26:30,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/524b4fd76fa84dad8410de620eabaaad as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad 2024-11-20T13:26:30,498 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T13:26:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/1f7003d2260f4e3b8caba71bc55bd9eb as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb 2024-11-20T13:26:30,503 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T13:26:30,508 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1044ms, sequenceid=263, compaction requested=true 2024-11-20T13:26:30,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:30,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:30,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-20T13:26:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-20T13:26:30,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-20T13:26:30,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0280 sec 2024-11-20T13:26:30,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.0330 sec 2024-11-20T13:26:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T13:26:30,595 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-20T13:26:30,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T13:26:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-11-20T13:26:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:30,599 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T13:26:30,600 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T13:26:30,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T13:26:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:30,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:30,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:30,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:30,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:30,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:30,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:30,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:30,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201769fa3330fc4ffe9c39bc635b7ec461_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:30,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:30,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109250803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:30,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742497_1673 (size=12454) 2024-11-20T13:26:30,851 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:30,876 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201769fa3330fc4ffe9c39bc635b7ec461_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201769fa3330fc4ffe9c39bc635b7ec461_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:30,888 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cf859e4360f4419892f767a4acabc705, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:30,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cf859e4360f4419892f767a4acabc705 is 175, key is test_row_0/A:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:30,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:30,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:30,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109250910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:30,920 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:30,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:30,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:30,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742498_1674 (size=31255) 2024-11-20T13:26:30,928 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cf859e4360f4419892f767a4acabc705 2024-11-20T13:26:30,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/bde3aa2a850c4a7ab7654f9c43ba6f9c is 50, key is test_row_0/B:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742499_1675 (size=12301) 2024-11-20T13:26:31,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:31,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:31,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109251115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:31,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:31,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:31,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,390 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T13:26:31,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/bde3aa2a850c4a7ab7654f9c43ba6f9c 2024-11-20T13:26:31,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:31,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109251420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7191dd4ddf19498e936ff7ee457f3ade is 50, key is test_row_0/C:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:31,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742500_1676 (size=12301) 2024-11-20T13:26:31,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7191dd4ddf19498e936ff7ee457f3ade 2024-11-20T13:26:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/cf859e4360f4419892f767a4acabc705 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705 2024-11-20T13:26:31,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705, entries=150, sequenceid=287, filesize=30.5 K 2024-11-20T13:26:31,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/bde3aa2a850c4a7ab7654f9c43ba6f9c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c 2024-11-20T13:26:31,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T13:26:31,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/7191dd4ddf19498e936ff7ee457f3ade as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade 2024-11-20T13:26:31,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T13:26:31,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 769ms, sequenceid=287, compaction requested=true 2024-11-20T13:26:31,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:31,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T13:26:31,512 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:31,515 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:31,517 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:31,517 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:31,517 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,517 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e357dce76c346f4beddacf984aff043, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=48.2 K 2024-11-20T13:26:31,518 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e357dce76c346f4beddacf984aff043, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:31,518 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e663b0c8ff247ef8284c7016a32a5be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732109187650 2024-11-20T13:26:31,518 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f7003d2260f4e3b8caba71bc55bd9eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732109188325 2024-11-20T13:26:31,519 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7191dd4ddf19498e936ff7ee457f3ade, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:31,528 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125216 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:31,528 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:31,528 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,528 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=122.3 K 2024-11-20T13:26:31,528 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,528 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705] 2024-11-20T13:26:31,528 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b402942377514915b78f98318f5e0671, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:31,529 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting b6e4da51b5b04e39896067574da7475b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732109187650 2024-11-20T13:26:31,529 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting dc2359e222d843d394e949b17a84975b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732109188325 2024-11-20T13:26:31,529 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting cf859e4360f4419892f767a4acabc705, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:31,531 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#570 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:31,532 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a3e4afde61264e9bb1b846558d73388f is 50, key is test_row_0/C:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:31,547 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:31,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46739 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-20T13:26:31,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,548 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:31,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c42d24657bea4dfeaf03ba9f3b2f3502_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109190801/Put/seqid=0 2024-11-20T13:26:31,577 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:31,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742501_1677 (size=12983) 2024-11-20T13:26:31,583 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204602a554e95843bbab87b1b7d65e3488_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:31,586 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204602a554e95843bbab87b1b7d65e3488_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:31,586 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204602a554e95843bbab87b1b7d65e3488_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:31,612 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/a3e4afde61264e9bb1b846558d73388f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a3e4afde61264e9bb1b846558d73388f 2024-11-20T13:26:31,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742503_1679 (size=4469) 2024-11-20T13:26:31,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742502_1678 (size=12454) 2024-11-20T13:26:31,652 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into a3e4afde61264e9bb1b846558d73388f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:31,652 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:31,652 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=12, startTime=1732109191510; duration=0sec 2024-11-20T13:26:31,652 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:31,652 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:31,652 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T13:26:31,660 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T13:26:31,660 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:31,660 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:31,660 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/00330457c4454efbb1f9a04b8253f9b6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=48.2 K 2024-11-20T13:26:31,668 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00330457c4454efbb1f9a04b8253f9b6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1732109186445 2024-11-20T13:26:31,668 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c1b04fc64d4380b2028190caafdaac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732109187650 2024-11-20T13:26:31,669 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 524b4fd76fa84dad8410de620eabaaad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732109188325 2024-11-20T13:26:31,669 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting bde3aa2a850c4a7ab7654f9c43ba6f9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:31,701 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#573 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:31,701 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/431fdcc7910145c1ac863308db5d18fd is 50, key is test_row_0/B:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:31,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742504_1680 (size=12983) 2024-11-20T13:26:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:31,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. as already flushing 2024-11-20T13:26:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:32,033 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#572 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:32,034 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/89208ccbe0ee4356912d244ee9cc895c is 175, key is test_row_0/A:col10/1732109190737/Put/seqid=0 2024-11-20T13:26:32,052 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c42d24657bea4dfeaf03ba9f3b2f3502_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c42d24657bea4dfeaf03ba9f3b2f3502_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:32,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/3545b2bc0c5543deb7c0c5b11ce8ce8a, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:32,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/3545b2bc0c5543deb7c0c5b11ce8ce8a is 175, key is test_row_0/A:col10/1732109190801/Put/seqid=0 2024-11-20T13:26:32,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 321 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109252064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:32,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742505_1681 (size=31937) 2024-11-20T13:26:32,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742506_1682 (size=31255) 2024-11-20T13:26:32,077 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/3545b2bc0c5543deb7c0c5b11ce8ce8a 2024-11-20T13:26:32,094 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/89208ccbe0ee4356912d244ee9cc895c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c 2024-11-20T13:26:32,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/fc68861fd56b4cdab565c928237bead4 is 50, key is test_row_0/B:col10/1732109190801/Put/seqid=0 2024-11-20T13:26:32,104 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into 89208ccbe0ee4356912d244ee9cc895c(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:32,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:32,104 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=12, startTime=1732109191510; duration=0sec 2024-11-20T13:26:32,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:32,104 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:32,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742507_1683 (size=12301) 2024-11-20T13:26:32,168 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/431fdcc7910145c1ac863308db5d18fd as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/431fdcc7910145c1ac863308db5d18fd 2024-11-20T13:26:32,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109252168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:32,177 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 431fdcc7910145c1ac863308db5d18fd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:32,177 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:32,177 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=12, startTime=1732109191510; duration=0sec 2024-11-20T13:26:32,177 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:32,178 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:32,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109252373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:32,532 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/fc68861fd56b4cdab565c928237bead4 2024-11-20T13:26:32,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/02b26de7b8a44bacada07e38d4343f8f is 50, key is test_row_0/C:col10/1732109190801/Put/seqid=0 2024-11-20T13:26:32,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742508_1684 (size=12301) 2024-11-20T13:26:32,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:32,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109252684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:33,000 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/02b26de7b8a44bacada07e38d4343f8f 2024-11-20T13:26:33,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/3545b2bc0c5543deb7c0c5b11ce8ce8a as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a 2024-11-20T13:26:33,022 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a, entries=150, sequenceid=299, filesize=30.5 K 2024-11-20T13:26:33,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/fc68861fd56b4cdab565c928237bead4 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4 2024-11-20T13:26:33,028 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T13:26:33,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/02b26de7b8a44bacada07e38d4343f8f as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f 2024-11-20T13:26:33,049 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f, entries=150, sequenceid=299, filesize=12.0 K 2024-11-20T13:26:33,052 INFO [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1504ms, sequenceid=299, compaction requested=false 2024-11-20T13:26:33,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:33,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:33,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5ef453f0fbb6:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-20T13:26:33,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-11-20T13:26:33,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-20T13:26:33,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4540 sec 2024-11-20T13:26:33,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 2.4600 sec 2024-11-20T13:26:33,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:33,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T13:26:33,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:33,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:33,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:33,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:33,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:33,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:33,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c90e5fb0910b43d590aaccaeb82991c6_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:33,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742509_1685 (size=12454) 2024-11-20T13:26:33,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 338 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109253245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 340 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109253353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 342 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109253569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,628 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:33,663 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c90e5fb0910b43d590aaccaeb82991c6_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c90e5fb0910b43d590aaccaeb82991c6_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:33,664 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb7c7b5caff1452fb364b6c0d3a365c0, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:33,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb7c7b5caff1452fb364b6c0d3a365c0 is 175, key is test_row_0/A:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:33,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742510_1686 (size=31255) 2024-11-20T13:26:33,708 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb7c7b5caff1452fb364b6c0d3a365c0 2024-11-20T13:26:33,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/01ecbca2fd734af98d35f7b010177165 is 50, key is test_row_0/B:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:33,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742511_1687 (size=12301) 2024-11-20T13:26:33,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 344 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109253880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32872 deadline: 1732109253960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,963 DEBUG [Thread-2632 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:33,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732109253963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,964 DEBUG [Thread-2636 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:33,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32898 deadline: 1732109253989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32818 deadline: 1732109253990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:33,992 DEBUG [Thread-2630 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18230 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:33,994 DEBUG [Thread-2628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., hostname=5ef453f0fbb6,46739,1732109006137, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T13:26:34,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/01ecbca2fd734af98d35f7b010177165 2024-11-20T13:26:34,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/573dcc9dc9e743899b33d0211f4881ea is 50, key is test_row_0/C:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:34,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742512_1688 (size=12301) 2024-11-20T13:26:34,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/573dcc9dc9e743899b33d0211f4881ea 2024-11-20T13:26:34,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/eb7c7b5caff1452fb364b6c0d3a365c0 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0 2024-11-20T13:26:34,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0, entries=150, sequenceid=327, filesize=30.5 K 2024-11-20T13:26:34,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/01ecbca2fd734af98d35f7b010177165 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165 2024-11-20T13:26:34,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T13:26:34,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46739 {}] ipc.CallRunner(138): callId: 346 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32874 deadline: 1732109254392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:34,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T13:26:34,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/573dcc9dc9e743899b33d0211f4881ea as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea 2024-11-20T13:26:34,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T13:26:34,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1238ms, sequenceid=327, compaction requested=true 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7ff2ddfd4733260af7cbc9b7e7d2218:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T13:26:34,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T13:26:34,435 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:34,436 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:34,440 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:34,440 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/A is initiating minor compaction (all files) 2024-11-20T13:26:34,440 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/A in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:34,441 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=92.2 K 2024-11-20T13:26:34,441 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:34,441 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. files: [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0] 2024-11-20T13:26:34,442 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 89208ccbe0ee4356912d244ee9cc895c, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:34,442 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting 3545b2bc0c5543deb7c0c5b11ce8ce8a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109190774 2024-11-20T13:26:34,443 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] compactions.Compactor(224): Compacting eb7c7b5caff1452fb364b6c0d3a365c0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109192047 2024-11-20T13:26:34,446 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:34,446 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/B is initiating minor compaction (all files) 2024-11-20T13:26:34,446 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/B in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:34,446 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/431fdcc7910145c1ac863308db5d18fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=36.7 K 2024-11-20T13:26:34,452 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 431fdcc7910145c1ac863308db5d18fd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:34,456 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc68861fd56b4cdab565c928237bead4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109190774 2024-11-20T13:26:34,456 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01ecbca2fd734af98d35f7b010177165, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109192047 2024-11-20T13:26:34,457 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:34,464 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206a6cace9c7dd423cb9e68020b4b7e719_b7ff2ddfd4733260af7cbc9b7e7d2218 store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:34,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206a6cace9c7dd423cb9e68020b4b7e719_b7ff2ddfd4733260af7cbc9b7e7d2218, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:34,466 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206a6cace9c7dd423cb9e68020b4b7e719_b7ff2ddfd4733260af7cbc9b7e7d2218 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:34,474 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#B#compaction#580 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:34,474 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/59ed15a8966c44bd8a6acd25fe50d775 is 50, key is test_row_0/B:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:34,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742513_1689 (size=4469) 2024-11-20T13:26:34,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742514_1690 (size=13085) 2024-11-20T13:26:34,615 DEBUG [Thread-2641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x239fc289 to 127.0.0.1:53074 2024-11-20T13:26:34,615 DEBUG [Thread-2641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:34,616 DEBUG [Thread-2639 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e926f6c to 127.0.0.1:53074 2024-11-20T13:26:34,617 DEBUG [Thread-2639 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:34,617 DEBUG [Thread-2647 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4299ae8d to 127.0.0.1:53074 2024-11-20T13:26:34,617 DEBUG [Thread-2647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:34,617 DEBUG [Thread-2643 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26af975e to 127.0.0.1:53074 2024-11-20T13:26:34,617 DEBUG [Thread-2643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:34,619 DEBUG [Thread-2645 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x645da910 to 127.0.0.1:53074 2024-11-20T13:26:34,619 DEBUG [Thread-2645 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:34,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T13:26:34,713 INFO [Thread-2638 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-20T13:26:34,899 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#A#compaction#579 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:34,900 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/36e46c7c18014029ad823c80daddfbc1 is 175, key is test_row_0/A:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:34,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742515_1691 (size=32039) 2024-11-20T13:26:34,917 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/59ed15a8966c44bd8a6acd25fe50d775 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/59ed15a8966c44bd8a6acd25fe50d775 2024-11-20T13:26:34,922 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/B of b7ff2ddfd4733260af7cbc9b7e7d2218 into 59ed15a8966c44bd8a6acd25fe50d775(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:34,922 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:34,922 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/B, priority=13, startTime=1732109194435; duration=0sec 2024-11-20T13:26:34,922 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T13:26:34,922 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:B 2024-11-20T13:26:34,922 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T13:26:34,923 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T13:26:34,923 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1540): b7ff2ddfd4733260af7cbc9b7e7d2218/C is initiating minor compaction (all files) 2024-11-20T13:26:34,923 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7ff2ddfd4733260af7cbc9b7e7d2218/C in TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:34,923 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a3e4afde61264e9bb1b846558d73388f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea] into tmpdir=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp, totalSize=36.7 K 2024-11-20T13:26:34,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3e4afde61264e9bb1b846558d73388f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732109189602 2024-11-20T13:26:34,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02b26de7b8a44bacada07e38d4343f8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732109190774 2024-11-20T13:26:34,924 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] compactions.Compactor(224): Compacting 573dcc9dc9e743899b33d0211f4881ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732109192047 2024-11-20T13:26:34,935 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7ff2ddfd4733260af7cbc9b7e7d2218#C#compaction#581 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T13:26:34,935 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/30ef9d827ec749c2b041d40c2b261b71 is 50, key is test_row_0/C:col10/1732109193191/Put/seqid=0 2024-11-20T13:26:34,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742516_1692 (size=13085) 2024-11-20T13:26:35,309 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/36e46c7c18014029ad823c80daddfbc1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/36e46c7c18014029ad823c80daddfbc1 2024-11-20T13:26:35,312 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/A of b7ff2ddfd4733260af7cbc9b7e7d2218 into 36e46c7c18014029ad823c80daddfbc1(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:35,312 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:35,312 INFO [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/A, priority=13, startTime=1732109194435; duration=0sec 2024-11-20T13:26:35,313 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:35,313 DEBUG [RS:0;5ef453f0fbb6:46739-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:A 2024-11-20T13:26:35,349 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/30ef9d827ec749c2b041d40c2b261b71 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/30ef9d827ec749c2b041d40c2b261b71 2024-11-20T13:26:35,352 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7ff2ddfd4733260af7cbc9b7e7d2218/C of b7ff2ddfd4733260af7cbc9b7e7d2218 into 30ef9d827ec749c2b041d40c2b261b71(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T13:26:35,352 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:35,352 INFO [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218., storeName=b7ff2ddfd4733260af7cbc9b7e7d2218/C, priority=13, startTime=1732109194435; duration=0sec 2024-11-20T13:26:35,353 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T13:26:35,353 DEBUG [RS:0;5ef453f0fbb6:46739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7ff2ddfd4733260af7cbc9b7e7d2218:C 2024-11-20T13:26:35,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46739 {}] regionserver.HRegion(8581): Flush requested on b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:35,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:35,405 DEBUG [Thread-2634 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x459417c6 to 127.0.0.1:53074 2024-11-20T13:26:35,405 DEBUG [Thread-2634 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:35,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205db9713240cb46029ba0ab5afd51bd29_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109193232/Put/seqid=0 2024-11-20T13:26:35,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742517_1693 (size=12454) 2024-11-20T13:26:35,816 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:35,820 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205db9713240cb46029ba0ab5afd51bd29_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205db9713240cb46029ba0ab5afd51bd29_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:35,821 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a3d1a63feb02483886049d460bd6356d, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:35,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a3d1a63feb02483886049d460bd6356d is 175, key is test_row_0/A:col10/1732109193232/Put/seqid=0 2024-11-20T13:26:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742518_1694 (size=31255) 2024-11-20T13:26:36,230 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a3d1a63feb02483886049d460bd6356d 2024-11-20T13:26:36,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/9bff8a9673dc427bb3abfe4901b74a32 is 50, key is test_row_0/B:col10/1732109193232/Put/seqid=0 2024-11-20T13:26:36,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742519_1695 (size=12301) 2024-11-20T13:26:36,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/9bff8a9673dc427bb3abfe4901b74a32 2024-11-20T13:26:36,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/4dd7509b877b4ef186b57798d5a1c756 is 50, key is test_row_0/C:col10/1732109193232/Put/seqid=0 2024-11-20T13:26:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742520_1696 (size=12301) 2024-11-20T13:26:36,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/4dd7509b877b4ef186b57798d5a1c756 2024-11-20T13:26:36,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/a3d1a63feb02483886049d460bd6356d as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a3d1a63feb02483886049d460bd6356d 2024-11-20T13:26:36,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a3d1a63feb02483886049d460bd6356d, entries=150, sequenceid=342, filesize=30.5 K 2024-11-20T13:26:36,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/9bff8a9673dc427bb3abfe4901b74a32 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/9bff8a9673dc427bb3abfe4901b74a32 2024-11-20T13:26:36,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/9bff8a9673dc427bb3abfe4901b74a32, entries=150, sequenceid=342, filesize=12.0 K 2024-11-20T13:26:36,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/4dd7509b877b4ef186b57798d5a1c756 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/4dd7509b877b4ef186b57798d5a1c756 2024-11-20T13:26:36,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/4dd7509b877b4ef186b57798d5a1c756, entries=150, sequenceid=342, filesize=12.0 K 2024-11-20T13:26:36,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1271ms, sequenceid=342, compaction requested=false 2024-11-20T13:26:36,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:43,967 DEBUG [Thread-2632 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dae921d to 127.0.0.1:53074 2024-11-20T13:26:43,967 DEBUG [Thread-2632 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:43,996 DEBUG [Thread-2630 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787b683a to 127.0.0.1:53074 2024-11-20T13:26:43,996 DEBUG [Thread-2630 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:44,001 DEBUG [Thread-2636 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53eb3c7c to 127.0.0.1:53074 2024-11-20T13:26:44,001 DEBUG [Thread-2636 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:44,072 DEBUG [Thread-2628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x71d2b4d3 to 127.0.0.1:53074 2024-11-20T13:26:44,072 DEBUG [Thread-2628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:44,072 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 2 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 229 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 11 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4708 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4788 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4609 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4715 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4813 2024-11-20T13:26:44,073 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T13:26:44,073 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:26:44,073 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41799513 to 127.0.0.1:53074 2024-11-20T13:26:44,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:44,073 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T13:26:44,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T13:26:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:44,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:44,076 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109204076"}]},"ts":"1732109204076"} 2024-11-20T13:26:44,077 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T13:26:44,079 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T13:26:44,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T13:26:44,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=185, ppid=184, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, UNASSIGN}] 2024-11-20T13:26:44,082 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=185, ppid=184, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, UNASSIGN 2024-11-20T13:26:44,082 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=185 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=CLOSING, regionLocation=5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:44,083 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T13:26:44,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; CloseRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137}] 2024-11-20T13:26:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:44,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:44,235 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(124): Close b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1681): Closing b7ff2ddfd4733260af7cbc9b7e7d2218, disabling compactions & flushes 2024-11-20T13:26:44,236 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. after waiting 0 ms 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:44,236 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(2837): Flushing b7ff2ddfd4733260af7cbc9b7e7d2218 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=A 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=B 2024-11-20T13:26:44,236 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:44,237 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7ff2ddfd4733260af7cbc9b7e7d2218, store=C 2024-11-20T13:26:44,237 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T13:26:44,243 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120400b47301f834f26a8c3f93fa65e16e1_b7ff2ddfd4733260af7cbc9b7e7d2218 is 50, key is test_row_0/A:col10/1732109204071/Put/seqid=0 2024-11-20T13:26:44,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742521_1697 (size=9914) 2024-11-20T13:26:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:44,650 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T13:26:44,653 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120400b47301f834f26a8c3f93fa65e16e1_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120400b47301f834f26a8c3f93fa65e16e1_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:44,654 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0db085d919dc44c3a1dc52803412a470, store: [table=TestAcidGuarantees family=A region=b7ff2ddfd4733260af7cbc9b7e7d2218] 2024-11-20T13:26:44,654 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0db085d919dc44c3a1dc52803412a470 is 175, key is test_row_0/A:col10/1732109204071/Put/seqid=0 2024-11-20T13:26:44,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742522_1698 (size=22561) 2024-11-20T13:26:44,658 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=349, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0db085d919dc44c3a1dc52803412a470 2024-11-20T13:26:44,662 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/39d6eae5389440f7a8af28e6a6ca6a85 is 50, key is test_row_0/B:col10/1732109204071/Put/seqid=0 2024-11-20T13:26:44,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742523_1699 (size=9857) 2024-11-20T13:26:44,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:45,067 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/39d6eae5389440f7a8af28e6a6ca6a85 2024-11-20T13:26:45,073 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d1c3d36e5ddb4ed7af51cec8f385aa6c is 50, key is test_row_0/C:col10/1732109204071/Put/seqid=0 2024-11-20T13:26:45,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742524_1700 (size=9857) 2024-11-20T13:26:45,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:45,477 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d1c3d36e5ddb4ed7af51cec8f385aa6c 2024-11-20T13:26:45,481 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/A/0db085d919dc44c3a1dc52803412a470 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0db085d919dc44c3a1dc52803412a470 2024-11-20T13:26:45,484 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0db085d919dc44c3a1dc52803412a470, entries=100, sequenceid=349, filesize=22.0 K 2024-11-20T13:26:45,485 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/B/39d6eae5389440f7a8af28e6a6ca6a85 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/39d6eae5389440f7a8af28e6a6ca6a85 2024-11-20T13:26:45,487 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/39d6eae5389440f7a8af28e6a6ca6a85, entries=100, sequenceid=349, filesize=9.6 K 2024-11-20T13:26:45,488 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/.tmp/C/d1c3d36e5ddb4ed7af51cec8f385aa6c as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d1c3d36e5ddb4ed7af51cec8f385aa6c 2024-11-20T13:26:45,491 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d1c3d36e5ddb4ed7af51cec8f385aa6c, entries=100, sequenceid=349, filesize=9.6 K 2024-11-20T13:26:45,491 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for b7ff2ddfd4733260af7cbc9b7e7d2218 in 1255ms, sequenceid=349, compaction requested=true 2024-11-20T13:26:45,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0] to archive 2024-11-20T13:26:45,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:45,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/212bc107bff14154b328eaa3be9d6064 2024-11-20T13:26:45,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0e8e60c701ae4f9dbabee68d43c4eb7a 2024-11-20T13:26:45,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/42bc632d913543d487211e94e14c8dda 2024-11-20T13:26:45,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb33cf42a50240eaa2e227c098479e3d 2024-11-20T13:26:45,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b0c3265b374a4d929c65a0dfe290166f 2024-11-20T13:26:45,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/1992484778384fd1bb6a2e46c0855600 2024-11-20T13:26:45,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/80f61552d7e3414faa7eb21cade93e6b 2024-11-20T13:26:45,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/76c508b03f5843d49b3e983f3513f8be 2024-11-20T13:26:45,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/2c69e04bc81c44448839f0f669e40c66 2024-11-20T13:26:45,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cb650402e44c4dc08033292bdbc713fa 2024-11-20T13:26:45,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/bbf9fd763f824ccd9d45112765229797 2024-11-20T13:26:45,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dca3313e6a854d228ba5210c1bf6babd 2024-11-20T13:26:45,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/6d6c3c76874f4581a329e6dbb3e739a6 2024-11-20T13:26:45,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb4d3d102b8143499fc1084eea2cc481 2024-11-20T13:26:45,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/ce3edbf269b54b2db5bcf7d05b907c16 2024-11-20T13:26:45,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a6f2fe1ce2844c33a5228fb00ae04e05 2024-11-20T13:26:45,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b402942377514915b78f98318f5e0671 2024-11-20T13:26:45,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/b6e4da51b5b04e39896067574da7475b 2024-11-20T13:26:45,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/dc2359e222d843d394e949b17a84975b 2024-11-20T13:26:45,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/89208ccbe0ee4356912d244ee9cc895c 2024-11-20T13:26:45,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/cf859e4360f4419892f767a4acabc705 2024-11-20T13:26:45,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/3545b2bc0c5543deb7c0c5b11ce8ce8a 2024-11-20T13:26:45,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/eb7c7b5caff1452fb364b6c0d3a365c0 2024-11-20T13:26:45,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/09ec13b851d648c3a95966f7d847f0f8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/ae22cf0c2b524f3e891879b599dc85ff, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/7acac3bf3eac4c5bb1a067f4f6a4f694, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/691287860517458288b94a06a7a32bb8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/00330457c4454efbb1f9a04b8253f9b6, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/431fdcc7910145c1ac863308db5d18fd, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165] to archive 2024-11-20T13:26:45,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:45,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/1fefda6b3b1f4a339eab862c44d6af83 2024-11-20T13:26:45,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0fa1519ca8f340b0bc2f03d34db909a3 2024-11-20T13:26:45,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/09ec13b851d648c3a95966f7d847f0f8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/09ec13b851d648c3a95966f7d847f0f8 2024-11-20T13:26:45,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/0d700bf0064d4643878ad32bd8be5fb4 2024-11-20T13:26:45,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/c38c99d20df44843a7f7f0e2417679a8 2024-11-20T13:26:45,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/268b90b67dc149eb8d746ccad9beabcb 2024-11-20T13:26:45,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/ae22cf0c2b524f3e891879b599dc85ff to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/ae22cf0c2b524f3e891879b599dc85ff 2024-11-20T13:26:45,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/58b2416300d743c0a76ebe8fb78fc2e6 2024-11-20T13:26:45,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8b4382c73a564866b60a72e95e3e318a 2024-11-20T13:26:45,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/7acac3bf3eac4c5bb1a067f4f6a4f694 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/7acac3bf3eac4c5bb1a067f4f6a4f694 2024-11-20T13:26:45,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/5462bffc26564c56a86e8ac4c0e89ffe 2024-11-20T13:26:45,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/cd47528d948344f28d3cd703fa7a5772 2024-11-20T13:26:45,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/691287860517458288b94a06a7a32bb8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/691287860517458288b94a06a7a32bb8 2024-11-20T13:26:45,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/8dec30daf9414ce0899b4dcadc125361 2024-11-20T13:26:45,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/2d46091538a84d349ab8a07024450a3a 2024-11-20T13:26:45,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/00330457c4454efbb1f9a04b8253f9b6 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/00330457c4454efbb1f9a04b8253f9b6 2024-11-20T13:26:45,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/41f89a947e2b4fc69646772c3d828117 2024-11-20T13:26:45,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/84c1b04fc64d4380b2028190caafdaac 2024-11-20T13:26:45,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/524b4fd76fa84dad8410de620eabaaad 2024-11-20T13:26:45,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/431fdcc7910145c1ac863308db5d18fd to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/431fdcc7910145c1ac863308db5d18fd 2024-11-20T13:26:45,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/bde3aa2a850c4a7ab7654f9c43ba6f9c 2024-11-20T13:26:45,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/fc68861fd56b4cdab565c928237bead4 2024-11-20T13:26:45,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/01ecbca2fd734af98d35f7b010177165 2024-11-20T13:26:45,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/b5607c7f31e042ad9a5f78e3742e4fc9, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/cc6365c1185244b48ce06b637786a326, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/58f12757724b4740b90bbe5ece04f77c, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8fc2d8a222b1415d93cd5a1affa8250b, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e357dce76c346f4beddacf984aff043, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a3e4afde61264e9bb1b846558d73388f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea] to archive 2024-11-20T13:26:45,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T13:26:45,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7dc34990f10f4981b3f34bd78a44efb7 2024-11-20T13:26:45,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d792969311724638bf1edeb84404d2f0 2024-11-20T13:26:45,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/b5607c7f31e042ad9a5f78e3742e4fc9 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/b5607c7f31e042ad9a5f78e3742e4fc9 2024-11-20T13:26:45,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/3a8475deab2b4af984a00e1ccd17ceb4 2024-11-20T13:26:45,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/494109b7d42f451abe2ae634bb21d9ea 2024-11-20T13:26:45,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a596fbb7e2654d9289065b947f21e43a 2024-11-20T13:26:45,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/cc6365c1185244b48ce06b637786a326 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/cc6365c1185244b48ce06b637786a326 2024-11-20T13:26:45,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/c05d3a581c044f00bde72e8ef50eca64 2024-11-20T13:26:45,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a8cfb4a2eb804f87b40638ea1f2f74ee 2024-11-20T13:26:45,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/58f12757724b4740b90bbe5ece04f77c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/58f12757724b4740b90bbe5ece04f77c 2024-11-20T13:26:45,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8168b7bdb550462bb0a31c048253a1ef 2024-11-20T13:26:45,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/f155b9c1ec7d4559af60ae3f07fff30f 2024-11-20T13:26:45,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8fc2d8a222b1415d93cd5a1affa8250b to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/8fc2d8a222b1415d93cd5a1affa8250b 2024-11-20T13:26:45,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6141f85e7dff414498fcc8003a28a9df 2024-11-20T13:26:45,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7aaf53f4224b47f0ab5177b8ed4d1de8 2024-11-20T13:26:45,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e357dce76c346f4beddacf984aff043 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e357dce76c346f4beddacf984aff043 2024-11-20T13:26:45,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/22ad8b2225db4ab99c58fe81e9a1dfba 2024-11-20T13:26:45,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/6e663b0c8ff247ef8284c7016a32a5be 2024-11-20T13:26:45,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/1f7003d2260f4e3b8caba71bc55bd9eb 2024-11-20T13:26:45,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a3e4afde61264e9bb1b846558d73388f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/a3e4afde61264e9bb1b846558d73388f 2024-11-20T13:26:45,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/7191dd4ddf19498e936ff7ee457f3ade 2024-11-20T13:26:45,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/02b26de7b8a44bacada07e38d4343f8f 2024-11-20T13:26:45,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/573dcc9dc9e743899b33d0211f4881ea 2024-11-20T13:26:45,571 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits/352.seqid, newMaxSeqId=352, maxSeqId=4 2024-11-20T13:26:45,572 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218. 2024-11-20T13:26:45,572 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1635): Region close journal for b7ff2ddfd4733260af7cbc9b7e7d2218: 2024-11-20T13:26:45,573 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(170): Closed b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:45,573 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=185 updating hbase:meta row=b7ff2ddfd4733260af7cbc9b7e7d2218, regionState=CLOSED 2024-11-20T13:26:45,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-11-20T13:26:45,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; CloseRegionProcedure b7ff2ddfd4733260af7cbc9b7e7d2218, server=5ef453f0fbb6,46739,1732109006137 in 1.4910 sec 2024-11-20T13:26:45,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=185, resume processing ppid=184 2024-11-20T13:26:45,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, ppid=184, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7ff2ddfd4733260af7cbc9b7e7d2218, UNASSIGN in 1.4940 sec 2024-11-20T13:26:45,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-20T13:26:45,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4970 sec 2024-11-20T13:26:45,579 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732109205579"}]},"ts":"1732109205579"} 2024-11-20T13:26:45,580 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T13:26:45,582 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T13:26:45,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5090 sec 2024-11-20T13:26:45,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T13:26:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T13:26:46,180 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-20T13:26:46,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T13:26:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,182 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=187, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T13:26:46,182 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=187, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,184 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,186 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C, FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits] 2024-11-20T13:26:46,187 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0db085d919dc44c3a1dc52803412a470 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/0db085d919dc44c3a1dc52803412a470 2024-11-20T13:26:46,188 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/36e46c7c18014029ad823c80daddfbc1 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/36e46c7c18014029ad823c80daddfbc1 2024-11-20T13:26:46,189 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a3d1a63feb02483886049d460bd6356d to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/A/a3d1a63feb02483886049d460bd6356d 2024-11-20T13:26:46,190 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/39d6eae5389440f7a8af28e6a6ca6a85 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/39d6eae5389440f7a8af28e6a6ca6a85 2024-11-20T13:26:46,191 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/59ed15a8966c44bd8a6acd25fe50d775 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/59ed15a8966c44bd8a6acd25fe50d775 2024-11-20T13:26:46,192 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/9bff8a9673dc427bb3abfe4901b74a32 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/B/9bff8a9673dc427bb3abfe4901b74a32 2024-11-20T13:26:46,194 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/30ef9d827ec749c2b041d40c2b261b71 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/30ef9d827ec749c2b041d40c2b261b71 2024-11-20T13:26:46,195 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/4dd7509b877b4ef186b57798d5a1c756 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/4dd7509b877b4ef186b57798d5a1c756 2024-11-20T13:26:46,196 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d1c3d36e5ddb4ed7af51cec8f385aa6c to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/C/d1c3d36e5ddb4ed7af51cec8f385aa6c 2024-11-20T13:26:46,199 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits/352.seqid to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218/recovered.edits/352.seqid 2024-11-20T13:26:46,199 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/default/TestAcidGuarantees/b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,199 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T13:26:46,200 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:26:46,201 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T13:26:46,204 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b47572ae3434e05be0acb873179e19e_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b47572ae3434e05be0acb873179e19e_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,205 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201769fa3330fc4ffe9c39bc635b7ec461_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201769fa3330fc4ffe9c39bc635b7ec461_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,206 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120400b47301f834f26a8c3f93fa65e16e1_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120400b47301f834f26a8c3f93fa65e16e1_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,212 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112054eccaf076e142e0a98aa5207f026074_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112054eccaf076e142e0a98aa5207f026074_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,214 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205db9713240cb46029ba0ab5afd51bd29_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205db9713240cb46029ba0ab5afd51bd29_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,217 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061fd6724dabe4935a27cea6b44358415_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112061fd6724dabe4935a27cea6b44358415_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,218 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208b345c44b6d942f795de512936090928_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208b345c44b6d942f795de512936090928_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,219 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209395889b4fbb4abf87643fc12a628cce_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209395889b4fbb4abf87643fc12a628cce_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,220 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a261c2623f984176aae81840e0816327_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a261c2623f984176aae81840e0816327_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,221 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5514cac18df4b4181958694010a07bc_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5514cac18df4b4181958694010a07bc_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,222 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c42d24657bea4dfeaf03ba9f3b2f3502_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c42d24657bea4dfeaf03ba9f3b2f3502_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,224 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c90e5fb0910b43d590aaccaeb82991c6_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c90e5fb0910b43d590aaccaeb82991c6_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,227 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdd4aedef1c14b1dba2ed87c76288192_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cdd4aedef1c14b1dba2ed87c76288192_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,229 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d7033bc3486f4b4a8b7daad164887981_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d7033bc3486f4b4a8b7daad164887981_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,230 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e228bd12646648f6839c2eafce555c02_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e228bd12646648f6839c2eafce555c02_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,231 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e26c1589f80042a791cf8f300f02221f_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e26c1589f80042a791cf8f300f02221f_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,232 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e2c765f6b89f4bcf9d47ba8004c0d449_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e2c765f6b89f4bcf9d47ba8004c0d449_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,234 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e7e92bc1277e4ec3871ed0e343b08c04_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e7e92bc1277e4ec3871ed0e343b08c04_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,235 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f6c5b5447c9248539b20c58fa4fc8cad_b7ff2ddfd4733260af7cbc9b7e7d2218 to hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f6c5b5447c9248539b20c58fa4fc8cad_b7ff2ddfd4733260af7cbc9b7e7d2218 2024-11-20T13:26:46,235 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T13:26:46,238 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=187, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,240 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T13:26:46,243 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T13:26:46,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=187, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T13:26:46,245 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732109206245"}]},"ts":"9223372036854775807"} 2024-11-20T13:26:46,247 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T13:26:46,247 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b7ff2ddfd4733260af7cbc9b7e7d2218, NAME => 'TestAcidGuarantees,,1732109171519.b7ff2ddfd4733260af7cbc9b7e7d2218.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T13:26:46,247 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T13:26:46,247 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732109206247"}]},"ts":"9223372036854775807"} 2024-11-20T13:26:46,249 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T13:26:46,251 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=187, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T13:26:46,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 71 msec 2024-11-20T13:26:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43647 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-20T13:26:46,283 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-11-20T13:26:46,296 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 239), OpenFileDescriptor=449 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=681 (was 742), ProcessCount=9 (was 11), AvailableMemoryMB=5732 (was 395) - AvailableMemoryMB LEAK? - 2024-11-20T13:26:46,297 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T13:26:46,297 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T13:26:46,297 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05cd5d49 to 127.0.0.1:53074 2024-11-20T13:26:46,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:46,297 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T13:26:46,297 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1657766203, stopped=false 2024-11-20T13:26:46,297 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=5ef453f0fbb6,43647,1732109004684 2024-11-20T13:26:46,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T13:26:46,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T13:26:46,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:26:46,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:26:46,299 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T13:26:46,300 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T13:26:46,300 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T13:26:46,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:46,301 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5ef453f0fbb6,46739,1732109006137' ***** 2024-11-20T13:26:46,301 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T13:26:46,301 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T13:26:46,301 INFO [RS:0;5ef453f0fbb6:46739 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T13:26:46,301 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(3579): Received CLOSE for 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1224): stopping server 5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:46,302 DEBUG [RS:0;5ef453f0fbb6:46739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T13:26:46,302 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T13:26:46,303 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2b8ebee7bfda350373a6614eb33b4fd3, disabling compactions & flushes 2024-11-20T13:26:46,303 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 2b8ebee7bfda350373a6614eb33b4fd3=hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3.} 2024-11-20T13:26:46,303 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. after waiting 0 ms 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T13:26:46,303 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 2b8ebee7bfda350373a6614eb33b4fd3 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T13:26:46,303 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T13:26:46,303 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T13:26:46,303 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T13:26:46,306 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2b8ebee7bfda350373a6614eb33b4fd3 2024-11-20T13:26:46,308 INFO [regionserver/5ef453f0fbb6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T13:26:46,330 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/.tmp/info/d618925b310d4dbd80fd9f13bd155bc1 is 45, key is default/info:d/1732109010483/Put/seqid=0 2024-11-20T13:26:46,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742525_1701 (size=5037) 2024-11-20T13:26:46,335 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/.tmp/info/d618925b310d4dbd80fd9f13bd155bc1 2024-11-20T13:26:46,339 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/info/bbdaf6186cde4241a10a2bc789f96bc3 is 143, key is hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3./info:regioninfo/1732109010324/Put/seqid=0 2024-11-20T13:26:46,340 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/.tmp/info/d618925b310d4dbd80fd9f13bd155bc1 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/info/d618925b310d4dbd80fd9f13bd155bc1 2024-11-20T13:26:46,345 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/info/d618925b310d4dbd80fd9f13bd155bc1, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T13:26:46,345 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 2b8ebee7bfda350373a6614eb33b4fd3 in 42ms, sequenceid=6, compaction requested=false 2024-11-20T13:26:46,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742526_1702 (size=7725) 2024-11-20T13:26:46,346 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/info/bbdaf6186cde4241a10a2bc789f96bc3 2024-11-20T13:26:46,372 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/namespace/2b8ebee7bfda350373a6614eb33b4fd3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T13:26:46,376 INFO [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:26:46,376 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2b8ebee7bfda350373a6614eb33b4fd3: 2024-11-20T13:26:46,376 DEBUG [RS_CLOSE_REGION-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732109009410.2b8ebee7bfda350373a6614eb33b4fd3. 2024-11-20T13:26:46,383 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/rep_barrier/d4d9456ea5e14908823fe5e3d89cb7ac is 102, key is TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae./rep_barrier:/1732109038206/DeleteFamily/seqid=0 2024-11-20T13:26:46,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742527_1703 (size=6025) 2024-11-20T13:26:46,391 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/rep_barrier/d4d9456ea5e14908823fe5e3d89cb7ac 2024-11-20T13:26:46,431 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/table/eda10c3696004175bbaf3dfcebd292ea is 96, key is TestAcidGuarantees,,1732109010721.cbbdc72320da06253b5398d0c51c77ae./table:/1732109038206/DeleteFamily/seqid=0 2024-11-20T13:26:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742528_1704 (size=5942) 2024-11-20T13:26:46,507 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T13:26:46,596 INFO [regionserver/5ef453f0fbb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T13:26:46,596 INFO [regionserver/5ef453f0fbb6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T13:26:46,707 DEBUG [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T13:26:46,842 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/table/eda10c3696004175bbaf3dfcebd292ea 2024-11-20T13:26:46,846 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/info/bbdaf6186cde4241a10a2bc789f96bc3 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/info/bbdaf6186cde4241a10a2bc789f96bc3 2024-11-20T13:26:46,849 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/info/bbdaf6186cde4241a10a2bc789f96bc3, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T13:26:46,849 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/rep_barrier/d4d9456ea5e14908823fe5e3d89cb7ac as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/rep_barrier/d4d9456ea5e14908823fe5e3d89cb7ac 2024-11-20T13:26:46,852 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/rep_barrier/d4d9456ea5e14908823fe5e3d89cb7ac, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T13:26:46,853 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/.tmp/table/eda10c3696004175bbaf3dfcebd292ea as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/table/eda10c3696004175bbaf3dfcebd292ea 2024-11-20T13:26:46,856 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/table/eda10c3696004175bbaf3dfcebd292ea, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T13:26:46,856 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 553ms, sequenceid=93, compaction requested=false 2024-11-20T13:26:46,860 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T13:26:46,861 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T13:26:46,861 INFO [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T13:26:46,861 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T13:26:46,861 DEBUG [RS_CLOSE_META-regionserver/5ef453f0fbb6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T13:26:46,907 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1250): stopping server 5ef453f0fbb6,46739,1732109006137; all regions closed. 2024-11-20T13:26:46,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741834_1010 (size=26050) 2024-11-20T13:26:46,914 DEBUG [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/oldWALs 2024-11-20T13:26:46,915 INFO [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5ef453f0fbb6%2C46739%2C1732109006137.meta:.meta(num 1732109009114) 2024-11-20T13:26:46,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741833_1009 (size=15907642) 2024-11-20T13:26:46,919 DEBUG [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/oldWALs 2024-11-20T13:26:46,919 INFO [RS:0;5ef453f0fbb6:46739 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5ef453f0fbb6%2C46739%2C1732109006137:(num 1732109008780) 2024-11-20T13:26:46,919 DEBUG [RS:0;5ef453f0fbb6:46739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:46,919 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T13:26:46,919 INFO [RS:0;5ef453f0fbb6:46739 {}] hbase.ChoreService(370): Chore service for: regionserver/5ef453f0fbb6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-20T13:26:46,920 INFO [regionserver/5ef453f0fbb6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T13:26:46,920 INFO [RS:0;5ef453f0fbb6:46739 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46739 2024-11-20T13:26:46,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5ef453f0fbb6,46739,1732109006137 2024-11-20T13:26:46,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T13:26:46,925 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$357/0x00007f55c88f3558@1c67b454 rejected from java.util.concurrent.ThreadPoolExecutor@596529bc[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-20T13:26:46,926 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ef453f0fbb6,46739,1732109006137] 2024-11-20T13:26:46,926 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5ef453f0fbb6,46739,1732109006137; numProcessing=1 2024-11-20T13:26:46,928 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5ef453f0fbb6,46739,1732109006137 already deleted, retry=false 2024-11-20T13:26:46,928 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5ef453f0fbb6,46739,1732109006137 expired; onlineServers=0 2024-11-20T13:26:46,928 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5ef453f0fbb6,43647,1732109004684' ***** 2024-11-20T13:26:46,928 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T13:26:46,928 DEBUG [M:0;5ef453f0fbb6:43647 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c323138, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ef453f0fbb6/172.17.0.2:0 2024-11-20T13:26:46,928 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionServer(1224): stopping server 5ef453f0fbb6,43647,1732109004684 2024-11-20T13:26:46,928 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionServer(1250): stopping server 5ef453f0fbb6,43647,1732109004684; all regions closed. 2024-11-20T13:26:46,928 DEBUG [M:0;5ef453f0fbb6:43647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T13:26:46,928 DEBUG [M:0;5ef453f0fbb6:43647 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T13:26:46,928 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T13:26:46,928 DEBUG [M:0;5ef453f0fbb6:43647 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T13:26:46,929 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.small.0-1732109008384 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.small.0-1732109008384,5,FailOnTimeoutGroup] 2024-11-20T13:26:46,929 INFO [M:0;5ef453f0fbb6:43647 {}] hbase.ChoreService(370): Chore service for: master/5ef453f0fbb6:0 had [] on shutdown 2024-11-20T13:26:46,929 DEBUG [M:0;5ef453f0fbb6:43647 {}] master.HMaster(1733): Stopping service threads 2024-11-20T13:26:46,929 INFO [M:0;5ef453f0fbb6:43647 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T13:26:46,929 ERROR [M:0;5ef453f0fbb6:43647 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (21019859) connection to localhost/127.0.0.1:40089 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40089,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T13:26:46,930 DEBUG [master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.large.0-1732109008382 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ef453f0fbb6:0:becomeActiveMaster-HFileCleaner.large.0-1732109008382,5,FailOnTimeoutGroup] 2024-11-20T13:26:46,930 INFO [M:0;5ef453f0fbb6:43647 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T13:26:46,930 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T13:26:46,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T13:26:46,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T13:26:46,930 DEBUG [M:0;5ef453f0fbb6:43647 {}] zookeeper.ZKUtil(347): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T13:26:46,930 WARN [M:0;5ef453f0fbb6:43647 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T13:26:46,930 INFO [M:0;5ef453f0fbb6:43647 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T13:26:46,931 INFO [M:0;5ef453f0fbb6:43647 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T13:26:46,931 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T13:26:46,931 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T13:26:46,931 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:26:46,931 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:26:46,931 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T13:26:46,931 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:26:46,931 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=810.73 KB heapSize=1000 KB 2024-11-20T13:26:46,954 DEBUG [M:0;5ef453f0fbb6:43647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bac6ceeaf0654368a4caf14afcfb7b94 is 82, key is hbase:meta,,1/info:regioninfo/1732109009293/Put/seqid=0 2024-11-20T13:26:46,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742529_1705 (size=5672) 2024-11-20T13:26:46,959 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2337 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bac6ceeaf0654368a4caf14afcfb7b94 2024-11-20T13:26:46,988 DEBUG [M:0;5ef453f0fbb6:43647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b95cc112b2e4972a4acda9385ceeb93 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\xA2/proc:d/1732109174537/Put/seqid=0 2024-11-20T13:26:46,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742530_1706 (size=46511) 2024-11-20T13:26:47,000 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=810.17 KB at sequenceid=2337 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b95cc112b2e4972a4acda9385ceeb93 2024-11-20T13:26:47,004 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6b95cc112b2e4972a4acda9385ceeb93 2024-11-20T13:26:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T13:26:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46739-0x1001519a4e30001, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T13:26:47,027 INFO [RS:0;5ef453f0fbb6:46739 {}] regionserver.HRegionServer(1307): Exiting; stopping=5ef453f0fbb6,46739,1732109006137; zookeeper connection closed. 2024-11-20T13:26:47,028 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a5fab69 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a5fab69 2024-11-20T13:26:47,028 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T13:26:47,029 DEBUG [M:0;5ef453f0fbb6:43647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fbebd1821c94b3db9eec5d16c1ed1e5 is 69, key is 5ef453f0fbb6,46739,1732109006137/rs:state/1732109008521/Put/seqid=0 2024-11-20T13:26:47,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073742531_1707 (size=5156) 2024-11-20T13:26:47,433 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2337 (bloomFilter=true), to=hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fbebd1821c94b3db9eec5d16c1ed1e5 2024-11-20T13:26:47,436 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bac6ceeaf0654368a4caf14afcfb7b94 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bac6ceeaf0654368a4caf14afcfb7b94 2024-11-20T13:26:47,439 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bac6ceeaf0654368a4caf14afcfb7b94, entries=8, sequenceid=2337, filesize=5.5 K 2024-11-20T13:26:47,439 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6b95cc112b2e4972a4acda9385ceeb93 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6b95cc112b2e4972a4acda9385ceeb93 2024-11-20T13:26:47,442 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6b95cc112b2e4972a4acda9385ceeb93 2024-11-20T13:26:47,442 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6b95cc112b2e4972a4acda9385ceeb93, entries=187, sequenceid=2337, filesize=45.4 K 2024-11-20T13:26:47,442 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fbebd1821c94b3db9eec5d16c1ed1e5 as hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9fbebd1821c94b3db9eec5d16c1ed1e5 2024-11-20T13:26:47,444 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40089/user/jenkins/test-data/30629a19-9e01-8d36-c058-6e655693e2cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9fbebd1821c94b3db9eec5d16c1ed1e5, entries=1, sequenceid=2337, filesize=5.0 K 2024-11-20T13:26:47,445 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(3040): Finished flush of dataSize ~810.73 KB/830184, heapSize ~999.70 KB/1023696, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 514ms, sequenceid=2337, compaction requested=false 2024-11-20T13:26:47,446 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T13:26:47,446 DEBUG [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T13:26:47,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34799 is added to blk_1073741830_1006 (size=983360) 2024-11-20T13:26:47,449 INFO [M:0;5ef453f0fbb6:43647 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T13:26:47,449 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T13:26:47,449 INFO [M:0;5ef453f0fbb6:43647 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43647 2024-11-20T13:26:47,451 DEBUG [M:0;5ef453f0fbb6:43647 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/5ef453f0fbb6,43647,1732109004684 already deleted, retry=false 2024-11-20T13:26:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T13:26:47,552 INFO [M:0;5ef453f0fbb6:43647 {}] regionserver.HRegionServer(1307): Exiting; stopping=5ef453f0fbb6,43647,1732109004684; zookeeper connection closed. 2024-11-20T13:26:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43647-0x1001519a4e30000, quorum=127.0.0.1:53074, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T13:26:47,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b45b1ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T13:26:47,559 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a4ec7f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T13:26:47,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T13:26:47,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ab13c09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T13:26:47,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54e7f8ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.log.dir/,STOPPED} 2024-11-20T13:26:47,560 WARN [BP-1593953133-172.17.0.2-1732108997715 heartbeating to localhost/127.0.0.1:40089 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1593953133-172.17.0.2-1732108997715 (Datanode Uuid 0c8e8961-1a10-417f-bcf5-fd12b33608bf) service to localhost/127.0.0.1:40089 2024-11-20T13:26:47,563 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/dfs/data/data1/current/BP-1593953133-172.17.0.2-1732108997715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T13:26:47,563 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/cluster_5e1ba8bd-fd63-38a7-797d-d702e73186e3/dfs/data/data2/current/BP-1593953133-172.17.0.2-1732108997715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T13:26:47,563 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T13:26:47,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54429d0a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T13:26:47,571 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49566914{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T13:26:47,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T13:26:47,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3db7010a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T13:26:47,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61767546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/2187439a-f223-192f-5ce4-f6eeb0d38298/hadoop.log.dir/,STOPPED} 2024-11-20T13:26:47,590 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T13:26:47,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down